VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 42712

Last change on this file since 42712 was 42601, checked in by vboxsync, 13 years ago

REM: Initial changes to make it work (seemingly) with MinGW-w64.

  • Property svn:eol-style set to native
File size: 196.4 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205/**
206 * Updates e2 with the DESC_A_MASK, writes it to the descriptor table, and
207 * returns the updated e2.
208 *
209 * @returns e2 with A set.
210 * @param e2 The 2nd selector DWORD.
211 */
212static uint32_t set_segment_accessed(int selector, uint32_t e2)
213{
214 SegmentCache *dt = selector & X86_SEL_LDT ? &env->ldt : &env->gdt;
215 target_ulong ptr = dt->base + (selector & X86_SEL_MASK);
216
217 e2 |= DESC_A_MASK;
218 stl_kernel(ptr + 4, e2);
219 return e2;
220}
221
222#endif /* VBOX */
223
224/* return non zero if error */
225static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
226 int selector)
227{
228 SegmentCache *dt;
229 int index;
230 target_ulong ptr;
231
232#ifdef VBOX
233 /* Trying to load a selector with CPL=1? */
234 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
235 {
236 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
237 selector = selector & 0xfffc;
238 }
239#endif /* VBOX */
240
241 if (selector & 0x4)
242 dt = &env->ldt;
243 else
244 dt = &env->gdt;
245 index = selector & ~7;
246 if ((index + 7) > dt->limit)
247 return -1;
248 ptr = dt->base + index;
249 *e1_ptr = ldl_kernel(ptr);
250 *e2_ptr = ldl_kernel(ptr + 4);
251 return 0;
252}
253
254static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
255{
256 unsigned int limit;
257 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
258 if (e2 & DESC_G_MASK)
259 limit = (limit << 12) | 0xfff;
260 return limit;
261}
262
263static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
264{
265 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
266}
267
268static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
269{
270 sc->base = get_seg_base(e1, e2);
271 sc->limit = get_seg_limit(e1, e2);
272 sc->flags = e2;
273#ifdef VBOX
274 sc->newselector = 0;
275 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID;
276#endif
277}
278
279/* init the segment cache in vm86 mode. */
280static inline void load_seg_vm(int seg, int selector)
281{
282 selector &= 0xffff;
283#ifdef VBOX
284 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
285 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
286 flags |= (3 << DESC_DPL_SHIFT);
287
288 cpu_x86_load_seg_cache(env, seg, selector,
289 (selector << 4), 0xffff, flags);
290#else /* VBOX */
291 cpu_x86_load_seg_cache(env, seg, selector,
292 (selector << 4), 0xffff, 0);
293#endif /* VBOX */
294}
295
296static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
297 uint32_t *esp_ptr, int dpl)
298{
299#ifndef VBOX
300 int type, index, shift;
301#else
302 unsigned int type, index, shift;
303#endif
304
305#if 0
306 {
307 int i;
308 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
309 for(i=0;i<env->tr.limit;i++) {
310 printf("%02x ", env->tr.base[i]);
311 if ((i & 7) == 7) printf("\n");
312 }
313 printf("\n");
314 }
315#endif
316
317 if (!(env->tr.flags & DESC_P_MASK))
318 cpu_abort(env, "invalid tss");
319 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
320 if ((type & 7) != 1)
321 cpu_abort(env, "invalid tss type");
322 shift = type >> 3;
323 index = (dpl * 4 + 2) << shift;
324 if (index + (4 << shift) - 1 > env->tr.limit)
325 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
326 if (shift == 0) {
327 *esp_ptr = lduw_kernel(env->tr.base + index);
328 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
329 } else {
330 *esp_ptr = ldl_kernel(env->tr.base + index);
331 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
332 }
333}
334
335/* XXX: merge with load_seg() */
336static void tss_load_seg(int seg_reg, int selector)
337{
338 uint32_t e1, e2;
339 int rpl, dpl, cpl;
340
341#ifdef VBOX
342 e1 = e2 = 0; /* gcc warning? */
343 cpl = env->hflags & HF_CPL_MASK;
344 /* Trying to load a selector with CPL=1? */
345 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
346 {
347 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
348 selector = selector & 0xfffc;
349 }
350#endif /* VBOX */
351
352 if ((selector & 0xfffc) != 0) {
353 if (load_segment(&e1, &e2, selector) != 0)
354 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
355 if (!(e2 & DESC_S_MASK))
356 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 rpl = selector & 3;
358 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
359 cpl = env->hflags & HF_CPL_MASK;
360 if (seg_reg == R_CS) {
361 if (!(e2 & DESC_CS_MASK))
362 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
363 /* XXX: is it correct ? */
364 if (dpl != rpl)
365 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
366 if ((e2 & DESC_C_MASK) && dpl > rpl)
367 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
368 } else if (seg_reg == R_SS) {
369 /* SS must be writable data */
370 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
371 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
372 if (dpl != cpl || dpl != rpl)
373 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
374 } else {
375 /* not readable code */
376 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
377 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
378 /* if data or non conforming code, checks the rights */
379 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
380 if (dpl < cpl || dpl < rpl)
381 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
382 }
383 }
384 if (!(e2 & DESC_P_MASK))
385 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
386 cpu_x86_load_seg_cache(env, seg_reg, selector,
387 get_seg_base(e1, e2),
388 get_seg_limit(e1, e2),
389 e2);
390 } else {
391 if (seg_reg == R_SS || seg_reg == R_CS)
392 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
393#ifdef VBOX
394# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
395 cpu_x86_load_seg_cache(env, seg_reg, selector,
396 0, 0, 0);
397# endif
398#endif /* VBOX */
399 }
400}
401
402#define SWITCH_TSS_JMP 0
403#define SWITCH_TSS_IRET 1
404#define SWITCH_TSS_CALL 2
405
406/* XXX: restore CPU state in registers (PowerPC case) */
407static void switch_tss(int tss_selector,
408 uint32_t e1, uint32_t e2, int source,
409 uint32_t next_eip)
410{
411 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
412 target_ulong tss_base;
413 uint32_t new_regs[8], new_segs[6];
414 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
415 uint32_t old_eflags, eflags_mask;
416 SegmentCache *dt;
417#ifndef VBOX
418 int index;
419#else
420 unsigned int index;
421#endif
422 target_ulong ptr;
423
424 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
425 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
426
427 /* if task gate, we read the TSS segment and we load it */
428 if (type == 5) {
429 if (!(e2 & DESC_P_MASK))
430 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
431 tss_selector = e1 >> 16;
432 if (tss_selector & 4)
433 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
434 if (load_segment(&e1, &e2, tss_selector) != 0)
435 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
436 if (e2 & DESC_S_MASK)
437 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
438 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
439 if ((type & 7) != 1)
440 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
441 }
442
443 if (!(e2 & DESC_P_MASK))
444 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
445
446 if (type & 8)
447 tss_limit_max = 103;
448 else
449 tss_limit_max = 43;
450 tss_limit = get_seg_limit(e1, e2);
451 tss_base = get_seg_base(e1, e2);
452 if ((tss_selector & 4) != 0 ||
453 tss_limit < tss_limit_max)
454 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
455 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
456 if (old_type & 8)
457 old_tss_limit_max = 103;
458 else
459 old_tss_limit_max = 43;
460
461 /* read all the registers from the new TSS */
462 if (type & 8) {
463 /* 32 bit */
464 new_cr3 = ldl_kernel(tss_base + 0x1c);
465 new_eip = ldl_kernel(tss_base + 0x20);
466 new_eflags = ldl_kernel(tss_base + 0x24);
467 for(i = 0; i < 8; i++)
468 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
469 for(i = 0; i < 6; i++)
470 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
471 new_ldt = lduw_kernel(tss_base + 0x60);
472 new_trap = ldl_kernel(tss_base + 0x64);
473 } else {
474 /* 16 bit */
475 new_cr3 = 0;
476 new_eip = lduw_kernel(tss_base + 0x0e);
477 new_eflags = lduw_kernel(tss_base + 0x10);
478 for(i = 0; i < 8; i++)
479 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
480 for(i = 0; i < 4; i++)
481 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
482 new_ldt = lduw_kernel(tss_base + 0x2a);
483 new_segs[R_FS] = 0;
484 new_segs[R_GS] = 0;
485 new_trap = 0;
486 }
487
488 /* NOTE: we must avoid memory exceptions during the task switch,
489 so we make dummy accesses before */
490 /* XXX: it can still fail in some cases, so a bigger hack is
491 necessary to valid the TLB after having done the accesses */
492
493 v1 = ldub_kernel(env->tr.base);
494 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
495 stb_kernel(env->tr.base, v1);
496 stb_kernel(env->tr.base + old_tss_limit_max, v2);
497
498 /* clear busy bit (it is restartable) */
499 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
500 target_ulong ptr;
501 uint32_t e2;
502 ptr = env->gdt.base + (env->tr.selector & ~7);
503 e2 = ldl_kernel(ptr + 4);
504 e2 &= ~DESC_TSS_BUSY_MASK;
505 stl_kernel(ptr + 4, e2);
506 }
507 old_eflags = compute_eflags();
508 if (source == SWITCH_TSS_IRET)
509 old_eflags &= ~NT_MASK;
510
511 /* save the current state in the old TSS */
512 if (type & 8) {
513 /* 32 bit */
514 stl_kernel(env->tr.base + 0x20, next_eip);
515 stl_kernel(env->tr.base + 0x24, old_eflags);
516 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
517 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
518 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
519 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
520 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
521 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
522 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
523 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
524 for(i = 0; i < 6; i++)
525 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
526#ifdef VBOX
527 /* Must store the ldt as it gets reloaded and might have been changed. */
528 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
529#endif
530#if defined(VBOX) && defined(DEBUG)
531 printf("TSS 32 bits switch\n");
532 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
533#endif
534 } else {
535 /* 16 bit */
536 stw_kernel(env->tr.base + 0x0e, next_eip);
537 stw_kernel(env->tr.base + 0x10, old_eflags);
538 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
539 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
540 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
541 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
542 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
543 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
544 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
545 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
546 for(i = 0; i < 4; i++)
547 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
548#ifdef VBOX
549 /* Must store the ldt as it gets reloaded and might have been changed. */
550 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
551#endif
552 }
553
554 /* now if an exception occurs, it will occurs in the next task
555 context */
556
557 if (source == SWITCH_TSS_CALL) {
558 stw_kernel(tss_base, env->tr.selector);
559 new_eflags |= NT_MASK;
560 }
561
562 /* set busy bit */
563 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
564 target_ulong ptr;
565 uint32_t e2;
566 ptr = env->gdt.base + (tss_selector & ~7);
567 e2 = ldl_kernel(ptr + 4);
568 e2 |= DESC_TSS_BUSY_MASK;
569 stl_kernel(ptr + 4, e2);
570 }
571
572 /* set the new CPU state */
573 /* from this point, any exception which occurs can give problems */
574 env->cr[0] |= CR0_TS_MASK;
575 env->hflags |= HF_TS_MASK;
576 env->tr.selector = tss_selector;
577 env->tr.base = tss_base;
578 env->tr.limit = tss_limit;
579 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
580#ifdef VBOX
581 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
582 env->tr.newselector = 0;
583#endif
584
585 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
586 cpu_x86_update_cr3(env, new_cr3);
587 }
588
589 /* load all registers without an exception, then reload them with
590 possible exception */
591 env->eip = new_eip;
592 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
593 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
594 if (!(type & 8))
595 eflags_mask &= 0xffff;
596 load_eflags(new_eflags, eflags_mask);
597 /* XXX: what to do in 16 bit case ? */
598 EAX = new_regs[0];
599 ECX = new_regs[1];
600 EDX = new_regs[2];
601 EBX = new_regs[3];
602 ESP = new_regs[4];
603 EBP = new_regs[5];
604 ESI = new_regs[6];
605 EDI = new_regs[7];
606 if (new_eflags & VM_MASK) {
607 for(i = 0; i < 6; i++)
608 load_seg_vm(i, new_segs[i]);
609 /* in vm86, CPL is always 3 */
610 cpu_x86_set_cpl(env, 3);
611 } else {
612 /* CPL is set the RPL of CS */
613 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
614 /* first just selectors as the rest may trigger exceptions */
615 for(i = 0; i < 6; i++)
616 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
617 }
618
619 env->ldt.selector = new_ldt & ~4;
620 env->ldt.base = 0;
621 env->ldt.limit = 0;
622 env->ldt.flags = 0;
623#ifdef VBOX
624 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
625 env->ldt.newselector = 0;
626#endif
627
628 /* load the LDT */
629 if (new_ldt & 4)
630 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
631
632 if ((new_ldt & 0xfffc) != 0) {
633 dt = &env->gdt;
634 index = new_ldt & ~7;
635 if ((index + 7) > dt->limit)
636 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
637 ptr = dt->base + index;
638 e1 = ldl_kernel(ptr);
639 e2 = ldl_kernel(ptr + 4);
640 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
641 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
642 if (!(e2 & DESC_P_MASK))
643 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
644 load_seg_cache_raw_dt(&env->ldt, e1, e2);
645 }
646
647 /* load the segments */
648 if (!(new_eflags & VM_MASK)) {
649 tss_load_seg(R_CS, new_segs[R_CS]);
650 tss_load_seg(R_SS, new_segs[R_SS]);
651 tss_load_seg(R_ES, new_segs[R_ES]);
652 tss_load_seg(R_DS, new_segs[R_DS]);
653 tss_load_seg(R_FS, new_segs[R_FS]);
654 tss_load_seg(R_GS, new_segs[R_GS]);
655 }
656
657 /* check that EIP is in the CS segment limits */
658 if (new_eip > env->segs[R_CS].limit) {
659 /* XXX: different exception if CALL ? */
660 raise_exception_err(EXCP0D_GPF, 0);
661 }
662
663#ifndef CONFIG_USER_ONLY
664 /* reset local breakpoints */
665 if (env->dr[7] & 0x55) {
666 for (i = 0; i < 4; i++) {
667 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
668 hw_breakpoint_remove(env, i);
669 }
670 env->dr[7] &= ~0x55;
671 }
672#endif
673}
674
675/* check if Port I/O is allowed in TSS */
676static inline void check_io(int addr, int size)
677{
678#ifndef VBOX
679 int io_offset, val, mask;
680#else
681 int val, mask;
682 unsigned int io_offset;
683#endif /* VBOX */
684
685 /* TSS must be a valid 32 bit one */
686 if (!(env->tr.flags & DESC_P_MASK) ||
687 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
688 env->tr.limit < 103)
689 goto fail;
690 io_offset = lduw_kernel(env->tr.base + 0x66);
691 io_offset += (addr >> 3);
692 /* Note: the check needs two bytes */
693 if ((io_offset + 1) > env->tr.limit)
694 goto fail;
695 val = lduw_kernel(env->tr.base + io_offset);
696 val >>= (addr & 7);
697 mask = (1 << size) - 1;
698 /* all bits must be zero to allow the I/O */
699 if ((val & mask) != 0) {
700 fail:
701 raise_exception_err(EXCP0D_GPF, 0);
702 }
703}
704
705#ifdef VBOX
706
707/* Keep in sync with gen_check_external_event() */
708void helper_check_external_event()
709{
710 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_FLUSH_TLB
711 | CPU_INTERRUPT_EXTERNAL_EXIT
712 | CPU_INTERRUPT_EXTERNAL_TIMER
713 | CPU_INTERRUPT_EXTERNAL_DMA))
714 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
715 && (env->eflags & IF_MASK)
716 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
717 {
718 helper_external_event();
719 }
720
721}
722
723void helper_sync_seg(uint32_t reg)
724{
725 if (env->segs[reg].newselector)
726 sync_seg(env, reg, env->segs[reg].newselector);
727}
728
729#endif /* VBOX */
730
731void helper_check_iob(uint32_t t0)
732{
733 check_io(t0, 1);
734}
735
736void helper_check_iow(uint32_t t0)
737{
738 check_io(t0, 2);
739}
740
741void helper_check_iol(uint32_t t0)
742{
743 check_io(t0, 4);
744}
745
746void helper_outb(uint32_t port, uint32_t data)
747{
748#ifndef VBOX
749 cpu_outb(port, data & 0xff);
750#else
751 cpu_outb(env, port, data & 0xff);
752#endif
753}
754
755target_ulong helper_inb(uint32_t port)
756{
757#ifndef VBOX
758 return cpu_inb(port);
759#else
760 return cpu_inb(env, port);
761#endif
762}
763
764void helper_outw(uint32_t port, uint32_t data)
765{
766#ifndef VBOX
767 cpu_outw(port, data & 0xffff);
768#else
769 cpu_outw(env, port, data & 0xffff);
770#endif
771}
772
773target_ulong helper_inw(uint32_t port)
774{
775#ifndef VBOX
776 return cpu_inw(port);
777#else
778 return cpu_inw(env, port);
779#endif
780}
781
782void helper_outl(uint32_t port, uint32_t data)
783{
784#ifndef VBOX
785 cpu_outl(port, data);
786#else
787 cpu_outl(env, port, data);
788#endif
789}
790
791target_ulong helper_inl(uint32_t port)
792{
793#ifndef VBOX
794 return cpu_inl(port);
795#else
796 return cpu_inl(env, port);
797#endif
798}
799
800static inline unsigned int get_sp_mask(unsigned int e2)
801{
802 if (e2 & DESC_B_MASK)
803 return 0xffffffff;
804 else
805 return 0xffff;
806}
807
808static int exeption_has_error_code(int intno)
809{
810 switch(intno) {
811 case 8:
812 case 10:
813 case 11:
814 case 12:
815 case 13:
816 case 14:
817 case 17:
818 return 1;
819 }
820 return 0;
821}
822
823#ifdef TARGET_X86_64
824#define SET_ESP(val, sp_mask)\
825do {\
826 if ((sp_mask) == 0xffff)\
827 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
828 else if ((sp_mask) == 0xffffffffLL)\
829 ESP = (uint32_t)(val);\
830 else\
831 ESP = (val);\
832} while (0)
833#else
834#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
835#endif
836
837/* in 64-bit machines, this can overflow. So this segment addition macro
838 * can be used to trim the value to 32-bit whenever needed */
839#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
840
841/* XXX: add a is_user flag to have proper security support */
842#define PUSHW(ssp, sp, sp_mask, val)\
843{\
844 sp -= 2;\
845 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
846}
847
848#define PUSHL(ssp, sp, sp_mask, val)\
849{\
850 sp -= 4;\
851 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
852}
853
854#define POPW(ssp, sp, sp_mask, val)\
855{\
856 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
857 sp += 2;\
858}
859
860#define POPL(ssp, sp, sp_mask, val)\
861{\
862 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
863 sp += 4;\
864}
865
866/* protected mode interrupt */
867static void do_interrupt_protected(int intno, int is_int, int error_code,
868 unsigned int next_eip, int is_hw)
869{
870 SegmentCache *dt;
871 target_ulong ptr, ssp;
872 int type, dpl, selector, ss_dpl, cpl;
873 int has_error_code, new_stack, shift;
874 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
875 uint32_t old_eip, sp_mask;
876
877#ifdef VBOX
878 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
879 cpu_loop_exit();
880#endif
881
882 has_error_code = 0;
883 if (!is_int && !is_hw)
884 has_error_code = exeption_has_error_code(intno);
885 if (is_int)
886 old_eip = next_eip;
887 else
888 old_eip = env->eip;
889
890 dt = &env->idt;
891#ifndef VBOX
892 if (intno * 8 + 7 > dt->limit)
893#else
894 if ((unsigned)intno * 8 + 7 > dt->limit)
895#endif
896 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
897 ptr = dt->base + intno * 8;
898 e1 = ldl_kernel(ptr);
899 e2 = ldl_kernel(ptr + 4);
900 /* check gate type */
901 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
902 switch(type) {
903 case 5: /* task gate */
904 /* must do that check here to return the correct error code */
905 if (!(e2 & DESC_P_MASK))
906 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
907 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
908 if (has_error_code) {
909 int type;
910 uint32_t mask;
911 /* push the error code */
912 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
913 shift = type >> 3;
914 if (env->segs[R_SS].flags & DESC_B_MASK)
915 mask = 0xffffffff;
916 else
917 mask = 0xffff;
918 esp = (ESP - (2 << shift)) & mask;
919 ssp = env->segs[R_SS].base + esp;
920 if (shift)
921 stl_kernel(ssp, error_code);
922 else
923 stw_kernel(ssp, error_code);
924 SET_ESP(esp, mask);
925 }
926 return;
927 case 6: /* 286 interrupt gate */
928 case 7: /* 286 trap gate */
929 case 14: /* 386 interrupt gate */
930 case 15: /* 386 trap gate */
931 break;
932 default:
933 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
934 break;
935 }
936 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
937 cpl = env->hflags & HF_CPL_MASK;
938 /* check privilege if software int */
939 if (is_int && dpl < cpl)
940 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
941 /* check valid bit */
942 if (!(e2 & DESC_P_MASK))
943 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
944 selector = e1 >> 16;
945 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
946 if ((selector & 0xfffc) == 0)
947 raise_exception_err(EXCP0D_GPF, 0);
948
949 if (load_segment(&e1, &e2, selector) != 0)
950 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
951 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
952 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
953 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
954 if (dpl > cpl)
955 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
956 if (!(e2 & DESC_P_MASK))
957 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
958 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
959 /* to inner privilege */
960 get_ss_esp_from_tss(&ss, &esp, dpl);
961 if ((ss & 0xfffc) == 0)
962 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
963 if ((ss & 3) != dpl)
964 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
965 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
966 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
967 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
968 if (ss_dpl != dpl)
969 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
970 if (!(ss_e2 & DESC_S_MASK) ||
971 (ss_e2 & DESC_CS_MASK) ||
972 !(ss_e2 & DESC_W_MASK))
973 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
974 if (!(ss_e2 & DESC_P_MASK))
975#ifdef VBOX /* See page 3-477 of 253666.pdf */
976 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
977#else
978 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
979#endif
980 new_stack = 1;
981 sp_mask = get_sp_mask(ss_e2);
982 ssp = get_seg_base(ss_e1, ss_e2);
983#if defined(VBOX) && defined(DEBUG)
984 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
985#endif
986 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
987 /* to same privilege */
988 if (env->eflags & VM_MASK)
989 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
990 new_stack = 0;
991 sp_mask = get_sp_mask(env->segs[R_SS].flags);
992 ssp = env->segs[R_SS].base;
993 esp = ESP;
994 dpl = cpl;
995 } else {
996 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
997 new_stack = 0; /* avoid warning */
998 sp_mask = 0; /* avoid warning */
999 ssp = 0; /* avoid warning */
1000 esp = 0; /* avoid warning */
1001 }
1002
1003 shift = type >> 3;
1004
1005#if 0
1006 /* XXX: check that enough room is available */
1007 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
1008 if (env->eflags & VM_MASK)
1009 push_size += 8;
1010 push_size <<= shift;
1011#endif
1012 if (shift == 1) {
1013 if (new_stack) {
1014 if (env->eflags & VM_MASK) {
1015 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
1016 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
1017 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
1018 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
1019 }
1020 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
1021 PUSHL(ssp, esp, sp_mask, ESP);
1022 }
1023 PUSHL(ssp, esp, sp_mask, compute_eflags());
1024 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
1025 PUSHL(ssp, esp, sp_mask, old_eip);
1026 if (has_error_code) {
1027 PUSHL(ssp, esp, sp_mask, error_code);
1028 }
1029 } else {
1030 if (new_stack) {
1031 if (env->eflags & VM_MASK) {
1032 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1033 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1034 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1035 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1036 }
1037 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1038 PUSHW(ssp, esp, sp_mask, ESP);
1039 }
1040 PUSHW(ssp, esp, sp_mask, compute_eflags());
1041 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1042 PUSHW(ssp, esp, sp_mask, old_eip);
1043 if (has_error_code) {
1044 PUSHW(ssp, esp, sp_mask, error_code);
1045 }
1046 }
1047
1048 if (new_stack) {
1049 if (env->eflags & VM_MASK) {
1050 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1051 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1052 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1053 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1054 }
1055 ss = (ss & ~3) | dpl;
1056 cpu_x86_load_seg_cache(env, R_SS, ss,
1057 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1058 }
1059 SET_ESP(esp, sp_mask);
1060
1061 selector = (selector & ~3) | dpl;
1062 cpu_x86_load_seg_cache(env, R_CS, selector,
1063 get_seg_base(e1, e2),
1064 get_seg_limit(e1, e2),
1065 e2);
1066 cpu_x86_set_cpl(env, dpl);
1067 env->eip = offset;
1068
1069 /* interrupt gate clear IF mask */
1070 if ((type & 1) == 0) {
1071 env->eflags &= ~IF_MASK;
1072 }
1073#ifndef VBOX
1074 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1075#else
1076 /*
1077 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1078 * gets confused by seemingly changed EFLAGS. See #3491 and
1079 * public bug #2341.
1080 */
1081 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1082#endif
1083}
1084
1085#ifdef VBOX
1086
1087/* check if VME interrupt redirection is enabled in TSS */
1088DECLINLINE(bool) is_vme_irq_redirected(int intno)
1089{
1090 unsigned int io_offset, intredir_offset;
1091 unsigned char val, mask;
1092
1093 /* TSS must be a valid 32 bit one */
1094 if (!(env->tr.flags & DESC_P_MASK) ||
1095 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1096 env->tr.limit < 103)
1097 goto fail;
1098 io_offset = lduw_kernel(env->tr.base + 0x66);
1099 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1100 if (io_offset < 0x68 + 0x20)
1101 io_offset = 0x68 + 0x20;
1102 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1103 intredir_offset = io_offset - 0x20;
1104
1105 intredir_offset += (intno >> 3);
1106 if ((intredir_offset) > env->tr.limit)
1107 goto fail;
1108
1109 val = ldub_kernel(env->tr.base + intredir_offset);
1110 mask = 1 << (unsigned char)(intno & 7);
1111
1112 /* bit set means no redirection. */
1113 if ((val & mask) != 0) {
1114 return false;
1115 }
1116 return true;
1117
1118fail:
1119 raise_exception_err(EXCP0D_GPF, 0);
1120 return true;
1121}
1122
1123/* V86 mode software interrupt with CR4.VME=1 */
1124static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1125{
1126 target_ulong ptr, ssp;
1127 int selector;
1128 uint32_t offset, esp;
1129 uint32_t old_cs, old_eflags;
1130 uint32_t iopl;
1131
1132 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1133
1134 if (!is_vme_irq_redirected(intno))
1135 {
1136 if (iopl == 3)
1137 {
1138 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1139 return;
1140 }
1141 else
1142 raise_exception_err(EXCP0D_GPF, 0);
1143 }
1144
1145 /* virtual mode idt is at linear address 0 */
1146 ptr = 0 + intno * 4;
1147 offset = lduw_kernel(ptr);
1148 selector = lduw_kernel(ptr + 2);
1149 esp = ESP;
1150 ssp = env->segs[R_SS].base;
1151 old_cs = env->segs[R_CS].selector;
1152
1153 old_eflags = compute_eflags();
1154 if (iopl < 3)
1155 {
1156 /* copy VIF into IF and set IOPL to 3 */
1157 if (env->eflags & VIF_MASK)
1158 old_eflags |= IF_MASK;
1159 else
1160 old_eflags &= ~IF_MASK;
1161
1162 old_eflags |= (3 << IOPL_SHIFT);
1163 }
1164
1165 /* XXX: use SS segment size ? */
1166 PUSHW(ssp, esp, 0xffff, old_eflags);
1167 PUSHW(ssp, esp, 0xffff, old_cs);
1168 PUSHW(ssp, esp, 0xffff, next_eip);
1169
1170 /* update processor state */
1171 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1172 env->eip = offset;
1173 env->segs[R_CS].selector = selector;
1174 env->segs[R_CS].base = (selector << 4);
1175 env->eflags &= ~(TF_MASK | RF_MASK);
1176
1177 if (iopl < 3)
1178 env->eflags &= ~VIF_MASK;
1179 else
1180 env->eflags &= ~IF_MASK;
1181}
1182
1183#endif /* VBOX */
1184
1185#ifdef TARGET_X86_64
1186
1187#define PUSHQ(sp, val)\
1188{\
1189 sp -= 8;\
1190 stq_kernel(sp, (val));\
1191}
1192
1193#define POPQ(sp, val)\
1194{\
1195 val = ldq_kernel(sp);\
1196 sp += 8;\
1197}
1198
1199static inline target_ulong get_rsp_from_tss(int level)
1200{
1201 int index;
1202
1203#if 0
1204 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1205 env->tr.base, env->tr.limit);
1206#endif
1207
1208 if (!(env->tr.flags & DESC_P_MASK))
1209 cpu_abort(env, "invalid tss");
1210 index = 8 * level + 4;
1211 if ((index + 7) > env->tr.limit)
1212 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1213 return ldq_kernel(env->tr.base + index);
1214}
1215
1216/* 64 bit interrupt */
1217static void do_interrupt64(int intno, int is_int, int error_code,
1218 target_ulong next_eip, int is_hw)
1219{
1220 SegmentCache *dt;
1221 target_ulong ptr;
1222 int type, dpl, selector, cpl, ist;
1223 int has_error_code, new_stack;
1224 uint32_t e1, e2, e3, ss;
1225 target_ulong old_eip, esp, offset;
1226
1227#ifdef VBOX
1228 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1229 cpu_loop_exit();
1230#endif
1231
1232 has_error_code = 0;
1233 if (!is_int && !is_hw)
1234 has_error_code = exeption_has_error_code(intno);
1235 if (is_int)
1236 old_eip = next_eip;
1237 else
1238 old_eip = env->eip;
1239
1240 dt = &env->idt;
1241 if (intno * 16 + 15 > dt->limit)
1242 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1243 ptr = dt->base + intno * 16;
1244 e1 = ldl_kernel(ptr);
1245 e2 = ldl_kernel(ptr + 4);
1246 e3 = ldl_kernel(ptr + 8);
1247 /* check gate type */
1248 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1249 switch(type) {
1250 case 14: /* 386 interrupt gate */
1251 case 15: /* 386 trap gate */
1252 break;
1253 default:
1254 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1255 break;
1256 }
1257 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1258 cpl = env->hflags & HF_CPL_MASK;
1259 /* check privilege if software int */
1260 if (is_int && dpl < cpl)
1261 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1262 /* check valid bit */
1263 if (!(e2 & DESC_P_MASK))
1264 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1265 selector = e1 >> 16;
1266 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1267 ist = e2 & 7;
1268 if ((selector & 0xfffc) == 0)
1269 raise_exception_err(EXCP0D_GPF, 0);
1270
1271 if (load_segment(&e1, &e2, selector) != 0)
1272 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1273 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1274 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1275 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1276 if (dpl > cpl)
1277 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1278 if (!(e2 & DESC_P_MASK))
1279 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1280 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1281 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1282 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1283 /* to inner privilege */
1284 if (ist != 0)
1285 esp = get_rsp_from_tss(ist + 3);
1286 else
1287 esp = get_rsp_from_tss(dpl);
1288 esp &= ~0xfLL; /* align stack */
1289 ss = 0;
1290 new_stack = 1;
1291 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1292 /* to same privilege */
1293 if (env->eflags & VM_MASK)
1294 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1295 new_stack = 0;
1296 if (ist != 0)
1297 esp = get_rsp_from_tss(ist + 3);
1298 else
1299 esp = ESP;
1300 esp &= ~0xfLL; /* align stack */
1301 dpl = cpl;
1302 } else {
1303 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1304 new_stack = 0; /* avoid warning */
1305 esp = 0; /* avoid warning */
1306 }
1307
1308 PUSHQ(esp, env->segs[R_SS].selector);
1309 PUSHQ(esp, ESP);
1310 PUSHQ(esp, compute_eflags());
1311 PUSHQ(esp, env->segs[R_CS].selector);
1312 PUSHQ(esp, old_eip);
1313 if (has_error_code) {
1314 PUSHQ(esp, error_code);
1315 }
1316
1317 if (new_stack) {
1318 ss = 0 | dpl;
1319 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1320 }
1321 ESP = esp;
1322
1323 selector = (selector & ~3) | dpl;
1324 cpu_x86_load_seg_cache(env, R_CS, selector,
1325 get_seg_base(e1, e2),
1326 get_seg_limit(e1, e2),
1327 e2);
1328 cpu_x86_set_cpl(env, dpl);
1329 env->eip = offset;
1330
1331 /* interrupt gate clear IF mask */
1332 if ((type & 1) == 0) {
1333 env->eflags &= ~IF_MASK;
1334 }
1335#ifndef VBOX
1336 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1337#else /* VBOX */
1338 /*
1339 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1340 * gets confused by seemingly changed EFLAGS. See #3491 and
1341 * public bug #2341.
1342 */
1343 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1344#endif /* VBOX */
1345}
1346#endif
1347
1348#ifdef TARGET_X86_64
1349#if defined(CONFIG_USER_ONLY)
1350void helper_syscall(int next_eip_addend)
1351{
1352 env->exception_index = EXCP_SYSCALL;
1353 env->exception_next_eip = env->eip + next_eip_addend;
1354 cpu_loop_exit();
1355}
1356#else
1357void helper_syscall(int next_eip_addend)
1358{
1359 int selector;
1360
1361 if (!(env->efer & MSR_EFER_SCE)) {
1362 raise_exception_err(EXCP06_ILLOP, 0);
1363 }
1364 selector = (env->star >> 32) & 0xffff;
1365 if (env->hflags & HF_LMA_MASK) {
1366 int code64;
1367
1368 ECX = env->eip + next_eip_addend;
1369 env->regs[11] = compute_eflags();
1370
1371 code64 = env->hflags & HF_CS64_MASK;
1372
1373 cpu_x86_set_cpl(env, 0);
1374 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1375 0, 0xffffffff,
1376 DESC_G_MASK | DESC_P_MASK |
1377 DESC_S_MASK |
1378 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1379 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1380 0, 0xffffffff,
1381 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1382 DESC_S_MASK |
1383 DESC_W_MASK | DESC_A_MASK);
1384 env->eflags &= ~env->fmask;
1385 load_eflags(env->eflags, 0);
1386 if (code64)
1387 env->eip = env->lstar;
1388 else
1389 env->eip = env->cstar;
1390 } else {
1391 ECX = (uint32_t)(env->eip + next_eip_addend);
1392
1393 cpu_x86_set_cpl(env, 0);
1394 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1395 0, 0xffffffff,
1396 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1397 DESC_S_MASK |
1398 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1399 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1400 0, 0xffffffff,
1401 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1402 DESC_S_MASK |
1403 DESC_W_MASK | DESC_A_MASK);
1404 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1405 env->eip = (uint32_t)env->star;
1406 }
1407}
1408#endif
1409#endif
1410
1411#ifdef TARGET_X86_64
1412void helper_sysret(int dflag)
1413{
1414 int cpl, selector;
1415
1416 if (!(env->efer & MSR_EFER_SCE)) {
1417 raise_exception_err(EXCP06_ILLOP, 0);
1418 }
1419 cpl = env->hflags & HF_CPL_MASK;
1420 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1421 raise_exception_err(EXCP0D_GPF, 0);
1422 }
1423 selector = (env->star >> 48) & 0xffff;
1424 if (env->hflags & HF_LMA_MASK) {
1425 if (dflag == 2) {
1426 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1427 0, 0xffffffff,
1428 DESC_G_MASK | DESC_P_MASK |
1429 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1430 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1431 DESC_L_MASK);
1432 env->eip = ECX;
1433 } else {
1434 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1435 0, 0xffffffff,
1436 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1437 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1438 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1439 env->eip = (uint32_t)ECX;
1440 }
1441 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1442 0, 0xffffffff,
1443 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1444 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1445 DESC_W_MASK | DESC_A_MASK);
1446 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1447 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1448 cpu_x86_set_cpl(env, 3);
1449 } else {
1450 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1451 0, 0xffffffff,
1452 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1453 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1454 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1455 env->eip = (uint32_t)ECX;
1456 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1457 0, 0xffffffff,
1458 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1459 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1460 DESC_W_MASK | DESC_A_MASK);
1461 env->eflags |= IF_MASK;
1462 cpu_x86_set_cpl(env, 3);
1463 }
1464}
1465#endif
1466
1467#ifdef VBOX
1468
1469/**
1470 * Checks and processes external VMM events.
1471 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1472 */
1473void helper_external_event(void)
1474{
1475# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1476 uintptr_t uSP;
1477# ifdef RT_ARCH_AMD64
1478 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1479# else
1480 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1481# endif
1482 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1483# endif
1484 /* Keep in sync with flags checked by gen_check_external_event() */
1485 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1486 {
1487 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1488 ~CPU_INTERRUPT_EXTERNAL_HARD);
1489 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1490 }
1491 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1492 {
1493 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1494 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1495 cpu_exit(env);
1496 }
1497 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1498 {
1499 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1500 ~CPU_INTERRUPT_EXTERNAL_DMA);
1501 remR3DmaRun(env);
1502 }
1503 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1504 {
1505 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1506 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1507 remR3TimersRun(env);
1508 }
1509 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)
1510 {
1511 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1512 ~CPU_INTERRUPT_EXTERNAL_HARD);
1513 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1514 }
1515}
1516
1517/* helper for recording call instruction addresses for later scanning */
1518void helper_record_call()
1519{
1520 if ( !(env->state & CPU_RAW_RING0)
1521 && (env->cr[0] & CR0_PG_MASK)
1522 && !(env->eflags & X86_EFL_IF))
1523 remR3RecordCall(env);
1524}
1525
1526#endif /* VBOX */
1527
1528/* real mode interrupt */
1529static void do_interrupt_real(int intno, int is_int, int error_code,
1530 unsigned int next_eip)
1531{
1532 SegmentCache *dt;
1533 target_ulong ptr, ssp;
1534 int selector;
1535 uint32_t offset, esp;
1536 uint32_t old_cs, old_eip;
1537
1538 /* real mode (simpler !) */
1539 dt = &env->idt;
1540#ifndef VBOX
1541 if (intno * 4 + 3 > dt->limit)
1542#else
1543 if ((unsigned)intno * 4 + 3 > dt->limit)
1544#endif
1545 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1546 ptr = dt->base + intno * 4;
1547 offset = lduw_kernel(ptr);
1548 selector = lduw_kernel(ptr + 2);
1549 esp = ESP;
1550 ssp = env->segs[R_SS].base;
1551 if (is_int)
1552 old_eip = next_eip;
1553 else
1554 old_eip = env->eip;
1555 old_cs = env->segs[R_CS].selector;
1556 /* XXX: use SS segment size ? */
1557 PUSHW(ssp, esp, 0xffff, compute_eflags());
1558 PUSHW(ssp, esp, 0xffff, old_cs);
1559 PUSHW(ssp, esp, 0xffff, old_eip);
1560
1561 /* update processor state */
1562 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1563 env->eip = offset;
1564 env->segs[R_CS].selector = selector;
1565 env->segs[R_CS].base = (selector << 4);
1566 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1567}
1568
1569/* fake user mode interrupt */
1570void do_interrupt_user(int intno, int is_int, int error_code,
1571 target_ulong next_eip)
1572{
1573 SegmentCache *dt;
1574 target_ulong ptr;
1575 int dpl, cpl, shift;
1576 uint32_t e2;
1577
1578 dt = &env->idt;
1579 if (env->hflags & HF_LMA_MASK) {
1580 shift = 4;
1581 } else {
1582 shift = 3;
1583 }
1584 ptr = dt->base + (intno << shift);
1585 e2 = ldl_kernel(ptr + 4);
1586
1587 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1588 cpl = env->hflags & HF_CPL_MASK;
1589 /* check privilege if software int */
1590 if (is_int && dpl < cpl)
1591 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1592
1593 /* Since we emulate only user space, we cannot do more than
1594 exiting the emulation with the suitable exception and error
1595 code */
1596 if (is_int)
1597 EIP = next_eip;
1598}
1599
1600#if !defined(CONFIG_USER_ONLY)
1601static void handle_even_inj(int intno, int is_int, int error_code,
1602 int is_hw, int rm)
1603{
1604 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1605 if (!(event_inj & SVM_EVTINJ_VALID)) {
1606 int type;
1607 if (is_int)
1608 type = SVM_EVTINJ_TYPE_SOFT;
1609 else
1610 type = SVM_EVTINJ_TYPE_EXEPT;
1611 event_inj = intno | type | SVM_EVTINJ_VALID;
1612 if (!rm && exeption_has_error_code(intno)) {
1613 event_inj |= SVM_EVTINJ_VALID_ERR;
1614 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1615 }
1616 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1617 }
1618}
1619#endif
1620
1621/*
1622 * Begin execution of an interruption. is_int is TRUE if coming from
1623 * the int instruction. next_eip is the EIP value AFTER the interrupt
1624 * instruction. It is only relevant if is_int is TRUE.
1625 */
1626void do_interrupt(int intno, int is_int, int error_code,
1627 target_ulong next_eip, int is_hw)
1628{
1629 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1630 if ((env->cr[0] & CR0_PE_MASK)) {
1631 static int count;
1632 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1633 count, intno, error_code, is_int,
1634 env->hflags & HF_CPL_MASK,
1635 env->segs[R_CS].selector, EIP,
1636 (int)env->segs[R_CS].base + EIP,
1637 env->segs[R_SS].selector, ESP);
1638 if (intno == 0x0e) {
1639 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1640 } else {
1641 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1642 }
1643 qemu_log("\n");
1644 log_cpu_state(env, X86_DUMP_CCOP);
1645#if 0
1646 {
1647 int i;
1648 uint8_t *ptr;
1649 qemu_log(" code=");
1650 ptr = env->segs[R_CS].base + env->eip;
1651 for(i = 0; i < 16; i++) {
1652 qemu_log(" %02x", ldub(ptr + i));
1653 }
1654 qemu_log("\n");
1655 }
1656#endif
1657 count++;
1658 }
1659 }
1660#ifdef VBOX
1661 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1662 if (is_int) {
1663 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1664 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1665 } else {
1666 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1667 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1668 }
1669 }
1670#endif
1671 if (env->cr[0] & CR0_PE_MASK) {
1672#if !defined(CONFIG_USER_ONLY)
1673 if (env->hflags & HF_SVMI_MASK)
1674 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1675#endif
1676#ifdef TARGET_X86_64
1677 if (env->hflags & HF_LMA_MASK) {
1678 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1679 } else
1680#endif
1681 {
1682#ifdef VBOX
1683 /* int xx *, v86 code and VME enabled? */
1684 if ( (env->eflags & VM_MASK)
1685 && (env->cr[4] & CR4_VME_MASK)
1686 && is_int
1687 && !is_hw
1688 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1689 )
1690 do_soft_interrupt_vme(intno, error_code, next_eip);
1691 else
1692#endif /* VBOX */
1693 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1694 }
1695 } else {
1696#if !defined(CONFIG_USER_ONLY)
1697 if (env->hflags & HF_SVMI_MASK)
1698 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1699#endif
1700 do_interrupt_real(intno, is_int, error_code, next_eip);
1701 }
1702
1703#if !defined(CONFIG_USER_ONLY)
1704 if (env->hflags & HF_SVMI_MASK) {
1705 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1706 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1707 }
1708#endif
1709}
1710
1711/* This should come from sysemu.h - if we could include it here... */
1712void qemu_system_reset_request(void);
1713
1714/*
1715 * Check nested exceptions and change to double or triple fault if
1716 * needed. It should only be called, if this is not an interrupt.
1717 * Returns the new exception number.
1718 */
1719static int check_exception(int intno, int *error_code)
1720{
1721 int first_contributory = env->old_exception == 0 ||
1722 (env->old_exception >= 10 &&
1723 env->old_exception <= 13);
1724 int second_contributory = intno == 0 ||
1725 (intno >= 10 && intno <= 13);
1726
1727 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1728 env->old_exception, intno);
1729
1730#if !defined(CONFIG_USER_ONLY)
1731 if (env->old_exception == EXCP08_DBLE) {
1732 if (env->hflags & HF_SVMI_MASK)
1733 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1734
1735 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1736
1737# ifndef VBOX
1738 qemu_system_reset_request();
1739# else
1740 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1741# endif
1742 return EXCP_HLT;
1743 }
1744#endif
1745
1746 if ((first_contributory && second_contributory)
1747 || (env->old_exception == EXCP0E_PAGE &&
1748 (second_contributory || (intno == EXCP0E_PAGE)))) {
1749 intno = EXCP08_DBLE;
1750 *error_code = 0;
1751 }
1752
1753 if (second_contributory || (intno == EXCP0E_PAGE) ||
1754 (intno == EXCP08_DBLE))
1755 env->old_exception = intno;
1756
1757 return intno;
1758}
1759
1760/*
1761 * Signal an interruption. It is executed in the main CPU loop.
1762 * is_int is TRUE if coming from the int instruction. next_eip is the
1763 * EIP value AFTER the interrupt instruction. It is only relevant if
1764 * is_int is TRUE.
1765 */
1766static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1767 int next_eip_addend)
1768{
1769#if defined(VBOX) && defined(DEBUG)
1770 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1771#endif
1772 if (!is_int) {
1773 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1774 intno = check_exception(intno, &error_code);
1775 } else {
1776 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1777 }
1778
1779 env->exception_index = intno;
1780 env->error_code = error_code;
1781 env->exception_is_int = is_int;
1782 env->exception_next_eip = env->eip + next_eip_addend;
1783 cpu_loop_exit();
1784}
1785
1786/* shortcuts to generate exceptions */
1787
1788void raise_exception_err(int exception_index, int error_code)
1789{
1790 raise_interrupt(exception_index, 0, error_code, 0);
1791}
1792
1793void raise_exception(int exception_index)
1794{
1795 raise_interrupt(exception_index, 0, 0, 0);
1796}
1797
1798void raise_exception_env(int exception_index, CPUState *nenv)
1799{
1800 env = nenv;
1801 raise_exception(exception_index);
1802}
1803/* SMM support */
1804
1805#if defined(CONFIG_USER_ONLY)
1806
1807void do_smm_enter(void)
1808{
1809}
1810
1811void helper_rsm(void)
1812{
1813}
1814
1815#else
1816
1817#ifdef TARGET_X86_64
1818#define SMM_REVISION_ID 0x00020064
1819#else
1820#define SMM_REVISION_ID 0x00020000
1821#endif
1822
1823void do_smm_enter(void)
1824{
1825 target_ulong sm_state;
1826 SegmentCache *dt;
1827 int i, offset;
1828
1829 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1830 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1831
1832 env->hflags |= HF_SMM_MASK;
1833 cpu_smm_update(env);
1834
1835 sm_state = env->smbase + 0x8000;
1836
1837#ifdef TARGET_X86_64
1838 for(i = 0; i < 6; i++) {
1839 dt = &env->segs[i];
1840 offset = 0x7e00 + i * 16;
1841 stw_phys(sm_state + offset, dt->selector);
1842 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1843 stl_phys(sm_state + offset + 4, dt->limit);
1844 stq_phys(sm_state + offset + 8, dt->base);
1845 }
1846
1847 stq_phys(sm_state + 0x7e68, env->gdt.base);
1848 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1849
1850 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1851 stq_phys(sm_state + 0x7e78, env->ldt.base);
1852 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1853 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1854
1855 stq_phys(sm_state + 0x7e88, env->idt.base);
1856 stl_phys(sm_state + 0x7e84, env->idt.limit);
1857
1858 stw_phys(sm_state + 0x7e90, env->tr.selector);
1859 stq_phys(sm_state + 0x7e98, env->tr.base);
1860 stl_phys(sm_state + 0x7e94, env->tr.limit);
1861 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1862
1863 stq_phys(sm_state + 0x7ed0, env->efer);
1864
1865 stq_phys(sm_state + 0x7ff8, EAX);
1866 stq_phys(sm_state + 0x7ff0, ECX);
1867 stq_phys(sm_state + 0x7fe8, EDX);
1868 stq_phys(sm_state + 0x7fe0, EBX);
1869 stq_phys(sm_state + 0x7fd8, ESP);
1870 stq_phys(sm_state + 0x7fd0, EBP);
1871 stq_phys(sm_state + 0x7fc8, ESI);
1872 stq_phys(sm_state + 0x7fc0, EDI);
1873 for(i = 8; i < 16; i++)
1874 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1875 stq_phys(sm_state + 0x7f78, env->eip);
1876 stl_phys(sm_state + 0x7f70, compute_eflags());
1877 stl_phys(sm_state + 0x7f68, env->dr[6]);
1878 stl_phys(sm_state + 0x7f60, env->dr[7]);
1879
1880 stl_phys(sm_state + 0x7f48, env->cr[4]);
1881 stl_phys(sm_state + 0x7f50, env->cr[3]);
1882 stl_phys(sm_state + 0x7f58, env->cr[0]);
1883
1884 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1885 stl_phys(sm_state + 0x7f00, env->smbase);
1886#else
1887 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1888 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1889 stl_phys(sm_state + 0x7ff4, compute_eflags());
1890 stl_phys(sm_state + 0x7ff0, env->eip);
1891 stl_phys(sm_state + 0x7fec, EDI);
1892 stl_phys(sm_state + 0x7fe8, ESI);
1893 stl_phys(sm_state + 0x7fe4, EBP);
1894 stl_phys(sm_state + 0x7fe0, ESP);
1895 stl_phys(sm_state + 0x7fdc, EBX);
1896 stl_phys(sm_state + 0x7fd8, EDX);
1897 stl_phys(sm_state + 0x7fd4, ECX);
1898 stl_phys(sm_state + 0x7fd0, EAX);
1899 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1900 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1901
1902 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1903 stl_phys(sm_state + 0x7f64, env->tr.base);
1904 stl_phys(sm_state + 0x7f60, env->tr.limit);
1905 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1906
1907 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1908 stl_phys(sm_state + 0x7f80, env->ldt.base);
1909 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1910 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1911
1912 stl_phys(sm_state + 0x7f74, env->gdt.base);
1913 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1914
1915 stl_phys(sm_state + 0x7f58, env->idt.base);
1916 stl_phys(sm_state + 0x7f54, env->idt.limit);
1917
1918 for(i = 0; i < 6; i++) {
1919 dt = &env->segs[i];
1920 if (i < 3)
1921 offset = 0x7f84 + i * 12;
1922 else
1923 offset = 0x7f2c + (i - 3) * 12;
1924 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1925 stl_phys(sm_state + offset + 8, dt->base);
1926 stl_phys(sm_state + offset + 4, dt->limit);
1927 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1928 }
1929 stl_phys(sm_state + 0x7f14, env->cr[4]);
1930
1931 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1932 stl_phys(sm_state + 0x7ef8, env->smbase);
1933#endif
1934 /* init SMM cpu state */
1935
1936#ifdef TARGET_X86_64
1937 cpu_load_efer(env, 0);
1938#endif
1939 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1940 env->eip = 0x00008000;
1941 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1942 0xffffffff, 0);
1943 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1944 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1945 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1946 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1947 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1948
1949 cpu_x86_update_cr0(env,
1950 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1951 cpu_x86_update_cr4(env, 0);
1952 env->dr[7] = 0x00000400;
1953 CC_OP = CC_OP_EFLAGS;
1954}
1955
1956void helper_rsm(void)
1957{
1958#ifdef VBOX
1959 cpu_abort(env, "helper_rsm");
1960#else /* !VBOX */
1961 target_ulong sm_state;
1962 int i, offset;
1963 uint32_t val;
1964
1965 sm_state = env->smbase + 0x8000;
1966#ifdef TARGET_X86_64
1967 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1968
1969 for(i = 0; i < 6; i++) {
1970 offset = 0x7e00 + i * 16;
1971 cpu_x86_load_seg_cache(env, i,
1972 lduw_phys(sm_state + offset),
1973 ldq_phys(sm_state + offset + 8),
1974 ldl_phys(sm_state + offset + 4),
1975 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1976 }
1977
1978 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1979 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1980
1981 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1982 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1983 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1984 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1985#ifdef VBOX
1986 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
1987 env->ldt.newselector = 0;
1988#endif
1989
1990 env->idt.base = ldq_phys(sm_state + 0x7e88);
1991 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1992
1993 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1994 env->tr.base = ldq_phys(sm_state + 0x7e98);
1995 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1996 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1997#ifdef VBOX
1998 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
1999 env->tr.newselector = 0;
2000#endif
2001
2002 EAX = ldq_phys(sm_state + 0x7ff8);
2003 ECX = ldq_phys(sm_state + 0x7ff0);
2004 EDX = ldq_phys(sm_state + 0x7fe8);
2005 EBX = ldq_phys(sm_state + 0x7fe0);
2006 ESP = ldq_phys(sm_state + 0x7fd8);
2007 EBP = ldq_phys(sm_state + 0x7fd0);
2008 ESI = ldq_phys(sm_state + 0x7fc8);
2009 EDI = ldq_phys(sm_state + 0x7fc0);
2010 for(i = 8; i < 16; i++)
2011 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
2012 env->eip = ldq_phys(sm_state + 0x7f78);
2013 load_eflags(ldl_phys(sm_state + 0x7f70),
2014 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2015 env->dr[6] = ldl_phys(sm_state + 0x7f68);
2016 env->dr[7] = ldl_phys(sm_state + 0x7f60);
2017
2018 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
2019 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
2020 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
2021
2022 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2023 if (val & 0x20000) {
2024 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
2025 }
2026#else
2027 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
2028 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
2029 load_eflags(ldl_phys(sm_state + 0x7ff4),
2030 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2031 env->eip = ldl_phys(sm_state + 0x7ff0);
2032 EDI = ldl_phys(sm_state + 0x7fec);
2033 ESI = ldl_phys(sm_state + 0x7fe8);
2034 EBP = ldl_phys(sm_state + 0x7fe4);
2035 ESP = ldl_phys(sm_state + 0x7fe0);
2036 EBX = ldl_phys(sm_state + 0x7fdc);
2037 EDX = ldl_phys(sm_state + 0x7fd8);
2038 ECX = ldl_phys(sm_state + 0x7fd4);
2039 EAX = ldl_phys(sm_state + 0x7fd0);
2040 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
2041 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
2042
2043 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2044 env->tr.base = ldl_phys(sm_state + 0x7f64);
2045 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2046 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2047#ifdef VBOX
2048 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2049 env->tr.newselector = 0;
2050#endif
2051
2052 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2053 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2054 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2055 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2056#ifdef VBOX
2057 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2058 env->ldt.newselector = 0;
2059#endif
2060
2061 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2062 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2063
2064 env->idt.base = ldl_phys(sm_state + 0x7f58);
2065 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2066
2067 for(i = 0; i < 6; i++) {
2068 if (i < 3)
2069 offset = 0x7f84 + i * 12;
2070 else
2071 offset = 0x7f2c + (i - 3) * 12;
2072 cpu_x86_load_seg_cache(env, i,
2073 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2074 ldl_phys(sm_state + offset + 8),
2075 ldl_phys(sm_state + offset + 4),
2076 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2077 }
2078 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2079
2080 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2081 if (val & 0x20000) {
2082 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2083 }
2084#endif
2085 CC_OP = CC_OP_EFLAGS;
2086 env->hflags &= ~HF_SMM_MASK;
2087 cpu_smm_update(env);
2088
2089 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2090 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2091#endif /* !VBOX */
2092}
2093
2094#endif /* !CONFIG_USER_ONLY */
2095
2096
2097/* division, flags are undefined */
2098
2099void helper_divb_AL(target_ulong t0)
2100{
2101 unsigned int num, den, q, r;
2102
2103 num = (EAX & 0xffff);
2104 den = (t0 & 0xff);
2105 if (den == 0) {
2106 raise_exception(EXCP00_DIVZ);
2107 }
2108 q = (num / den);
2109 if (q > 0xff)
2110 raise_exception(EXCP00_DIVZ);
2111 q &= 0xff;
2112 r = (num % den) & 0xff;
2113 EAX = (EAX & ~0xffff) | (r << 8) | q;
2114}
2115
2116void helper_idivb_AL(target_ulong t0)
2117{
2118 int num, den, q, r;
2119
2120 num = (int16_t)EAX;
2121 den = (int8_t)t0;
2122 if (den == 0) {
2123 raise_exception(EXCP00_DIVZ);
2124 }
2125 q = (num / den);
2126 if (q != (int8_t)q)
2127 raise_exception(EXCP00_DIVZ);
2128 q &= 0xff;
2129 r = (num % den) & 0xff;
2130 EAX = (EAX & ~0xffff) | (r << 8) | q;
2131}
2132
2133void helper_divw_AX(target_ulong t0)
2134{
2135 unsigned int num, den, q, r;
2136
2137 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2138 den = (t0 & 0xffff);
2139 if (den == 0) {
2140 raise_exception(EXCP00_DIVZ);
2141 }
2142 q = (num / den);
2143 if (q > 0xffff)
2144 raise_exception(EXCP00_DIVZ);
2145 q &= 0xffff;
2146 r = (num % den) & 0xffff;
2147 EAX = (EAX & ~0xffff) | q;
2148 EDX = (EDX & ~0xffff) | r;
2149}
2150
2151void helper_idivw_AX(target_ulong t0)
2152{
2153 int num, den, q, r;
2154
2155 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2156 den = (int16_t)t0;
2157 if (den == 0) {
2158 raise_exception(EXCP00_DIVZ);
2159 }
2160 q = (num / den);
2161 if (q != (int16_t)q)
2162 raise_exception(EXCP00_DIVZ);
2163 q &= 0xffff;
2164 r = (num % den) & 0xffff;
2165 EAX = (EAX & ~0xffff) | q;
2166 EDX = (EDX & ~0xffff) | r;
2167}
2168
2169void helper_divl_EAX(target_ulong t0)
2170{
2171 unsigned int den, r;
2172 uint64_t num, q;
2173
2174 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2175 den = t0;
2176 if (den == 0) {
2177 raise_exception(EXCP00_DIVZ);
2178 }
2179 q = (num / den);
2180 r = (num % den);
2181 if (q > 0xffffffff)
2182 raise_exception(EXCP00_DIVZ);
2183 EAX = (uint32_t)q;
2184 EDX = (uint32_t)r;
2185}
2186
2187void helper_idivl_EAX(target_ulong t0)
2188{
2189 int den, r;
2190 int64_t num, q;
2191
2192 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2193 den = t0;
2194 if (den == 0) {
2195 raise_exception(EXCP00_DIVZ);
2196 }
2197 q = (num / den);
2198 r = (num % den);
2199 if (q != (int32_t)q)
2200 raise_exception(EXCP00_DIVZ);
2201 EAX = (uint32_t)q;
2202 EDX = (uint32_t)r;
2203}
2204
2205/* bcd */
2206
2207/* XXX: exception */
2208void helper_aam(int base)
2209{
2210 int al, ah;
2211 al = EAX & 0xff;
2212 ah = al / base;
2213 al = al % base;
2214 EAX = (EAX & ~0xffff) | al | (ah << 8);
2215 CC_DST = al;
2216}
2217
2218void helper_aad(int base)
2219{
2220 int al, ah;
2221 al = EAX & 0xff;
2222 ah = (EAX >> 8) & 0xff;
2223 al = ((ah * base) + al) & 0xff;
2224 EAX = (EAX & ~0xffff) | al;
2225 CC_DST = al;
2226}
2227
2228void helper_aaa(void)
2229{
2230 int icarry;
2231 int al, ah, af;
2232 int eflags;
2233
2234 eflags = helper_cc_compute_all(CC_OP);
2235 af = eflags & CC_A;
2236 al = EAX & 0xff;
2237 ah = (EAX >> 8) & 0xff;
2238
2239 icarry = (al > 0xf9);
2240 if (((al & 0x0f) > 9 ) || af) {
2241 al = (al + 6) & 0x0f;
2242 ah = (ah + 1 + icarry) & 0xff;
2243 eflags |= CC_C | CC_A;
2244 } else {
2245 eflags &= ~(CC_C | CC_A);
2246 al &= 0x0f;
2247 }
2248 EAX = (EAX & ~0xffff) | al | (ah << 8);
2249 CC_SRC = eflags;
2250}
2251
2252void helper_aas(void)
2253{
2254 int icarry;
2255 int al, ah, af;
2256 int eflags;
2257
2258 eflags = helper_cc_compute_all(CC_OP);
2259 af = eflags & CC_A;
2260 al = EAX & 0xff;
2261 ah = (EAX >> 8) & 0xff;
2262
2263 icarry = (al < 6);
2264 if (((al & 0x0f) > 9 ) || af) {
2265 al = (al - 6) & 0x0f;
2266 ah = (ah - 1 - icarry) & 0xff;
2267 eflags |= CC_C | CC_A;
2268 } else {
2269 eflags &= ~(CC_C | CC_A);
2270 al &= 0x0f;
2271 }
2272 EAX = (EAX & ~0xffff) | al | (ah << 8);
2273 CC_SRC = eflags;
2274}
2275
2276void helper_daa(void)
2277{
2278 int al, af, cf;
2279 int eflags;
2280
2281 eflags = helper_cc_compute_all(CC_OP);
2282 cf = eflags & CC_C;
2283 af = eflags & CC_A;
2284 al = EAX & 0xff;
2285
2286 eflags = 0;
2287 if (((al & 0x0f) > 9 ) || af) {
2288 al = (al + 6) & 0xff;
2289 eflags |= CC_A;
2290 }
2291 if ((al > 0x9f) || cf) {
2292 al = (al + 0x60) & 0xff;
2293 eflags |= CC_C;
2294 }
2295 EAX = (EAX & ~0xff) | al;
2296 /* well, speed is not an issue here, so we compute the flags by hand */
2297 eflags |= (al == 0) << 6; /* zf */
2298 eflags |= parity_table[al]; /* pf */
2299 eflags |= (al & 0x80); /* sf */
2300 CC_SRC = eflags;
2301}
2302
2303void helper_das(void)
2304{
2305 int al, al1, af, cf;
2306 int eflags;
2307
2308 eflags = helper_cc_compute_all(CC_OP);
2309 cf = eflags & CC_C;
2310 af = eflags & CC_A;
2311 al = EAX & 0xff;
2312
2313 eflags = 0;
2314 al1 = al;
2315 if (((al & 0x0f) > 9 ) || af) {
2316 eflags |= CC_A;
2317 if (al < 6 || cf)
2318 eflags |= CC_C;
2319 al = (al - 6) & 0xff;
2320 }
2321 if ((al1 > 0x99) || cf) {
2322 al = (al - 0x60) & 0xff;
2323 eflags |= CC_C;
2324 }
2325 EAX = (EAX & ~0xff) | al;
2326 /* well, speed is not an issue here, so we compute the flags by hand */
2327 eflags |= (al == 0) << 6; /* zf */
2328 eflags |= parity_table[al]; /* pf */
2329 eflags |= (al & 0x80); /* sf */
2330 CC_SRC = eflags;
2331}
2332
2333void helper_into(int next_eip_addend)
2334{
2335 int eflags;
2336 eflags = helper_cc_compute_all(CC_OP);
2337 if (eflags & CC_O) {
2338 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2339 }
2340}
2341
2342void helper_cmpxchg8b(target_ulong a0)
2343{
2344 uint64_t d;
2345 int eflags;
2346
2347 eflags = helper_cc_compute_all(CC_OP);
2348 d = ldq(a0);
2349 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2350 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2351 eflags |= CC_Z;
2352 } else {
2353 /* always do the store */
2354 stq(a0, d);
2355 EDX = (uint32_t)(d >> 32);
2356 EAX = (uint32_t)d;
2357 eflags &= ~CC_Z;
2358 }
2359 CC_SRC = eflags;
2360}
2361
2362#ifdef TARGET_X86_64
2363void helper_cmpxchg16b(target_ulong a0)
2364{
2365 uint64_t d0, d1;
2366 int eflags;
2367
2368 if ((a0 & 0xf) != 0)
2369 raise_exception(EXCP0D_GPF);
2370 eflags = helper_cc_compute_all(CC_OP);
2371 d0 = ldq(a0);
2372 d1 = ldq(a0 + 8);
2373 if (d0 == EAX && d1 == EDX) {
2374 stq(a0, EBX);
2375 stq(a0 + 8, ECX);
2376 eflags |= CC_Z;
2377 } else {
2378 /* always do the store */
2379 stq(a0, d0);
2380 stq(a0 + 8, d1);
2381 EDX = d1;
2382 EAX = d0;
2383 eflags &= ~CC_Z;
2384 }
2385 CC_SRC = eflags;
2386}
2387#endif
2388
2389void helper_single_step(void)
2390{
2391#ifndef CONFIG_USER_ONLY
2392 check_hw_breakpoints(env, 1);
2393 env->dr[6] |= DR6_BS;
2394#endif
2395 raise_exception(EXCP01_DB);
2396}
2397
2398void helper_cpuid(void)
2399{
2400 uint32_t eax, ebx, ecx, edx;
2401
2402 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2403
2404 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2405 EAX = eax;
2406 EBX = ebx;
2407 ECX = ecx;
2408 EDX = edx;
2409}
2410
2411void helper_enter_level(int level, int data32, target_ulong t1)
2412{
2413 target_ulong ssp;
2414 uint32_t esp_mask, esp, ebp;
2415
2416 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2417 ssp = env->segs[R_SS].base;
2418 ebp = EBP;
2419 esp = ESP;
2420 if (data32) {
2421 /* 32 bit */
2422 esp -= 4;
2423 while (--level) {
2424 esp -= 4;
2425 ebp -= 4;
2426 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2427 }
2428 esp -= 4;
2429 stl(ssp + (esp & esp_mask), t1);
2430 } else {
2431 /* 16 bit */
2432 esp -= 2;
2433 while (--level) {
2434 esp -= 2;
2435 ebp -= 2;
2436 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2437 }
2438 esp -= 2;
2439 stw(ssp + (esp & esp_mask), t1);
2440 }
2441}
2442
2443#ifdef TARGET_X86_64
2444void helper_enter64_level(int level, int data64, target_ulong t1)
2445{
2446 target_ulong esp, ebp;
2447 ebp = EBP;
2448 esp = ESP;
2449
2450 if (data64) {
2451 /* 64 bit */
2452 esp -= 8;
2453 while (--level) {
2454 esp -= 8;
2455 ebp -= 8;
2456 stq(esp, ldq(ebp));
2457 }
2458 esp -= 8;
2459 stq(esp, t1);
2460 } else {
2461 /* 16 bit */
2462 esp -= 2;
2463 while (--level) {
2464 esp -= 2;
2465 ebp -= 2;
2466 stw(esp, lduw(ebp));
2467 }
2468 esp -= 2;
2469 stw(esp, t1);
2470 }
2471}
2472#endif
2473
2474void helper_lldt(int selector)
2475{
2476 SegmentCache *dt;
2477 uint32_t e1, e2;
2478#ifndef VBOX
2479 int index, entry_limit;
2480#else
2481 unsigned int index, entry_limit;
2482#endif
2483 target_ulong ptr;
2484
2485#ifdef VBOX
2486 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2487 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2488#endif
2489
2490 selector &= 0xffff;
2491 if ((selector & 0xfffc) == 0) {
2492 /* XXX: NULL selector case: invalid LDT */
2493 env->ldt.base = 0;
2494 env->ldt.limit = 0;
2495#ifdef VBOX
2496 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2497 env->ldt.newselector = 0;
2498#endif
2499 } else {
2500 if (selector & 0x4)
2501 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2502 dt = &env->gdt;
2503 index = selector & ~7;
2504#ifdef TARGET_X86_64
2505 if (env->hflags & HF_LMA_MASK)
2506 entry_limit = 15;
2507 else
2508#endif
2509 entry_limit = 7;
2510 if ((index + entry_limit) > dt->limit)
2511 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2512 ptr = dt->base + index;
2513 e1 = ldl_kernel(ptr);
2514 e2 = ldl_kernel(ptr + 4);
2515 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2516 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2517 if (!(e2 & DESC_P_MASK))
2518 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2519#ifdef TARGET_X86_64
2520 if (env->hflags & HF_LMA_MASK) {
2521 uint32_t e3;
2522 e3 = ldl_kernel(ptr + 8);
2523 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2524 env->ldt.base |= (target_ulong)e3 << 32;
2525 } else
2526#endif
2527 {
2528 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2529 }
2530 }
2531 env->ldt.selector = selector;
2532#ifdef VBOX
2533 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2534 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2535#endif
2536}
2537
2538void helper_ltr(int selector)
2539{
2540 SegmentCache *dt;
2541 uint32_t e1, e2;
2542#ifndef VBOX
2543 int index, type, entry_limit;
2544#else
2545 unsigned int index;
2546 int type, entry_limit;
2547#endif
2548 target_ulong ptr;
2549
2550#ifdef VBOX
2551 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2552 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2553 env->tr.flags, (RTSEL)(selector & 0xffff)));
2554#endif
2555 selector &= 0xffff;
2556 if ((selector & 0xfffc) == 0) {
2557 /* NULL selector case: invalid TR */
2558 env->tr.base = 0;
2559 env->tr.limit = 0;
2560 env->tr.flags = 0;
2561#ifdef VBOX
2562 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2563 env->tr.newselector = 0;
2564#endif
2565 } else {
2566 if (selector & 0x4)
2567 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2568 dt = &env->gdt;
2569 index = selector & ~7;
2570#ifdef TARGET_X86_64
2571 if (env->hflags & HF_LMA_MASK)
2572 entry_limit = 15;
2573 else
2574#endif
2575 entry_limit = 7;
2576 if ((index + entry_limit) > dt->limit)
2577 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2578 ptr = dt->base + index;
2579 e1 = ldl_kernel(ptr);
2580 e2 = ldl_kernel(ptr + 4);
2581 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2582 if ((e2 & DESC_S_MASK) ||
2583 (type != 1 && type != 9))
2584 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2585 if (!(e2 & DESC_P_MASK))
2586 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2587#ifdef TARGET_X86_64
2588 if (env->hflags & HF_LMA_MASK) {
2589 uint32_t e3, e4;
2590 e3 = ldl_kernel(ptr + 8);
2591 e4 = ldl_kernel(ptr + 12);
2592 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2593 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2594 load_seg_cache_raw_dt(&env->tr, e1, e2);
2595 env->tr.base |= (target_ulong)e3 << 32;
2596 } else
2597#endif
2598 {
2599 load_seg_cache_raw_dt(&env->tr, e1, e2);
2600 }
2601 e2 |= DESC_TSS_BUSY_MASK;
2602 stl_kernel(ptr + 4, e2);
2603 }
2604 env->tr.selector = selector;
2605#ifdef VBOX
2606 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2607 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2608 env->tr.flags, (RTSEL)(selector & 0xffff)));
2609#endif
2610}
2611
2612/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2613void helper_load_seg(int seg_reg, int selector)
2614{
2615 uint32_t e1, e2;
2616 int cpl, dpl, rpl;
2617 SegmentCache *dt;
2618#ifndef VBOX
2619 int index;
2620#else
2621 unsigned int index;
2622#endif
2623 target_ulong ptr;
2624
2625 selector &= 0xffff;
2626 cpl = env->hflags & HF_CPL_MASK;
2627#ifdef VBOX
2628
2629 /* Trying to load a selector with CPL=1? */
2630 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2631 {
2632 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2633 selector = selector & 0xfffc;
2634 }
2635#endif /* VBOX */
2636 if ((selector & 0xfffc) == 0) {
2637 /* null selector case */
2638 if (seg_reg == R_SS
2639#ifdef TARGET_X86_64
2640 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2641#endif
2642 )
2643 raise_exception_err(EXCP0D_GPF, 0);
2644 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2645 } else {
2646
2647 if (selector & 0x4)
2648 dt = &env->ldt;
2649 else
2650 dt = &env->gdt;
2651 index = selector & ~7;
2652 if ((index + 7) > dt->limit)
2653 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2654 ptr = dt->base + index;
2655 e1 = ldl_kernel(ptr);
2656 e2 = ldl_kernel(ptr + 4);
2657
2658 if (!(e2 & DESC_S_MASK))
2659 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2660 rpl = selector & 3;
2661 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2662 if (seg_reg == R_SS) {
2663 /* must be writable segment */
2664 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2665 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2666 if (rpl != cpl || dpl != cpl)
2667 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2668 } else {
2669 /* must be readable segment */
2670 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2671 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2672
2673 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2674 /* if not conforming code, test rights */
2675 if (dpl < cpl || dpl < rpl)
2676 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2677 }
2678 }
2679
2680 if (!(e2 & DESC_P_MASK)) {
2681 if (seg_reg == R_SS)
2682 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2683 else
2684 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2685 }
2686
2687 /* set the access bit if not already set */
2688 if (!(e2 & DESC_A_MASK)) {
2689 e2 |= DESC_A_MASK;
2690 stl_kernel(ptr + 4, e2);
2691 }
2692
2693 cpu_x86_load_seg_cache(env, seg_reg, selector,
2694 get_seg_base(e1, e2),
2695 get_seg_limit(e1, e2),
2696 e2);
2697#if 0
2698 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2699 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2700#endif
2701 }
2702}
2703
2704/* protected mode jump */
2705void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2706 int next_eip_addend)
2707{
2708 int gate_cs, type;
2709 uint32_t e1, e2, cpl, dpl, rpl, limit;
2710 target_ulong next_eip;
2711
2712#ifdef VBOX /** @todo Why do we do this? */
2713 e1 = e2 = 0;
2714#endif
2715 if ((new_cs & 0xfffc) == 0)
2716 raise_exception_err(EXCP0D_GPF, 0);
2717 if (load_segment(&e1, &e2, new_cs) != 0)
2718 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2719 cpl = env->hflags & HF_CPL_MASK;
2720 if (e2 & DESC_S_MASK) {
2721 if (!(e2 & DESC_CS_MASK))
2722 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2723 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2724 if (e2 & DESC_C_MASK) {
2725 /* conforming code segment */
2726 if (dpl > cpl)
2727 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2728 } else {
2729 /* non conforming code segment */
2730 rpl = new_cs & 3;
2731 if (rpl > cpl)
2732 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2733 if (dpl != cpl)
2734 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2735 }
2736 if (!(e2 & DESC_P_MASK))
2737 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2738 limit = get_seg_limit(e1, e2);
2739 if (new_eip > limit &&
2740 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2741 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2742 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2743 get_seg_base(e1, e2), limit, e2);
2744 EIP = new_eip;
2745 } else {
2746 /* jump to call or task gate */
2747 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2748 rpl = new_cs & 3;
2749 cpl = env->hflags & HF_CPL_MASK;
2750 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2751 switch(type) {
2752 case 1: /* 286 TSS */
2753 case 9: /* 386 TSS */
2754 case 5: /* task gate */
2755 if (dpl < cpl || dpl < rpl)
2756 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2757 next_eip = env->eip + next_eip_addend;
2758 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2759 CC_OP = CC_OP_EFLAGS;
2760 break;
2761 case 4: /* 286 call gate */
2762 case 12: /* 386 call gate */
2763 if ((dpl < cpl) || (dpl < rpl))
2764 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2765 if (!(e2 & DESC_P_MASK))
2766 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2767 gate_cs = e1 >> 16;
2768 new_eip = (e1 & 0xffff);
2769 if (type == 12)
2770 new_eip |= (e2 & 0xffff0000);
2771 if (load_segment(&e1, &e2, gate_cs) != 0)
2772 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2773 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2774 /* must be code segment */
2775 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2776 (DESC_S_MASK | DESC_CS_MASK)))
2777 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2778 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2779 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2780 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2781 if (!(e2 & DESC_P_MASK))
2782#ifdef VBOX /* See page 3-514 of 253666.pdf */
2783 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2784#else
2785 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2786#endif
2787 limit = get_seg_limit(e1, e2);
2788 if (new_eip > limit)
2789 raise_exception_err(EXCP0D_GPF, 0);
2790 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2791 get_seg_base(e1, e2), limit, e2);
2792 EIP = new_eip;
2793 break;
2794 default:
2795 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2796 break;
2797 }
2798 }
2799}
2800
2801/* real mode call */
2802void helper_lcall_real(int new_cs, target_ulong new_eip1,
2803 int shift, int next_eip)
2804{
2805 int new_eip;
2806 uint32_t esp, esp_mask;
2807 target_ulong ssp;
2808
2809 new_eip = new_eip1;
2810 esp = ESP;
2811 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2812 ssp = env->segs[R_SS].base;
2813 if (shift) {
2814 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2815 PUSHL(ssp, esp, esp_mask, next_eip);
2816 } else {
2817 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2818 PUSHW(ssp, esp, esp_mask, next_eip);
2819 }
2820
2821 SET_ESP(esp, esp_mask);
2822 env->eip = new_eip;
2823 env->segs[R_CS].selector = new_cs;
2824 env->segs[R_CS].base = (new_cs << 4);
2825}
2826
2827/* protected mode call */
2828void helper_lcall_protected(int new_cs, target_ulong new_eip,
2829 int shift, int next_eip_addend)
2830{
2831 int new_stack, i;
2832 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2833 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2834 uint32_t val, limit, old_sp_mask;
2835 target_ulong ssp, old_ssp, next_eip;
2836
2837#ifdef VBOX /** @todo Why do we do this? */
2838 e1 = e2 = 0;
2839#endif
2840 next_eip = env->eip + next_eip_addend;
2841 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2842 LOG_PCALL_STATE(env);
2843 if ((new_cs & 0xfffc) == 0)
2844 raise_exception_err(EXCP0D_GPF, 0);
2845 if (load_segment(&e1, &e2, new_cs) != 0)
2846 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2847 cpl = env->hflags & HF_CPL_MASK;
2848 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2849 if (e2 & DESC_S_MASK) {
2850 if (!(e2 & DESC_CS_MASK))
2851 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2852 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2853 if (e2 & DESC_C_MASK) {
2854 /* conforming code segment */
2855 if (dpl > cpl)
2856 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2857 } else {
2858 /* non conforming code segment */
2859 rpl = new_cs & 3;
2860 if (rpl > cpl)
2861 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2862 if (dpl != cpl)
2863 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2864 }
2865 if (!(e2 & DESC_P_MASK))
2866 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2867
2868#ifdef TARGET_X86_64
2869 /* XXX: check 16/32 bit cases in long mode */
2870 if (shift == 2) {
2871 target_ulong rsp;
2872 /* 64 bit case */
2873 rsp = ESP;
2874 PUSHQ(rsp, env->segs[R_CS].selector);
2875 PUSHQ(rsp, next_eip);
2876 /* from this point, not restartable */
2877 ESP = rsp;
2878 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2879 get_seg_base(e1, e2),
2880 get_seg_limit(e1, e2), e2);
2881 EIP = new_eip;
2882 } else
2883#endif
2884 {
2885 sp = ESP;
2886 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2887 ssp = env->segs[R_SS].base;
2888 if (shift) {
2889 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2890 PUSHL(ssp, sp, sp_mask, next_eip);
2891 } else {
2892 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2893 PUSHW(ssp, sp, sp_mask, next_eip);
2894 }
2895
2896 limit = get_seg_limit(e1, e2);
2897 if (new_eip > limit)
2898 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2899 /* from this point, not restartable */
2900 SET_ESP(sp, sp_mask);
2901 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2902 get_seg_base(e1, e2), limit, e2);
2903 EIP = new_eip;
2904 }
2905 } else {
2906 /* check gate type */
2907 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2908 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2909 rpl = new_cs & 3;
2910 switch(type) {
2911 case 1: /* available 286 TSS */
2912 case 9: /* available 386 TSS */
2913 case 5: /* task gate */
2914 if (dpl < cpl || dpl < rpl)
2915 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2916 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2917 CC_OP = CC_OP_EFLAGS;
2918 return;
2919 case 4: /* 286 call gate */
2920 case 12: /* 386 call gate */
2921 break;
2922 default:
2923 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2924 break;
2925 }
2926 shift = type >> 3;
2927
2928 if (dpl < cpl || dpl < rpl)
2929 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2930 /* check valid bit */
2931 if (!(e2 & DESC_P_MASK))
2932 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2933 selector = e1 >> 16;
2934 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2935 param_count = e2 & 0x1f;
2936 if ((selector & 0xfffc) == 0)
2937 raise_exception_err(EXCP0D_GPF, 0);
2938
2939 if (load_segment(&e1, &e2, selector) != 0)
2940 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2941 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2942 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2943 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2944 if (dpl > cpl)
2945 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2946 if (!(e2 & DESC_P_MASK))
2947 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2948
2949 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2950 /* to inner privilege */
2951 get_ss_esp_from_tss(&ss, &sp, dpl);
2952 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2953 ss, sp, param_count, ESP);
2954 if ((ss & 0xfffc) == 0)
2955 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2956 if ((ss & 3) != dpl)
2957 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2958 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2959 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2960 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2961 if (ss_dpl != dpl)
2962 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2963 if (!(ss_e2 & DESC_S_MASK) ||
2964 (ss_e2 & DESC_CS_MASK) ||
2965 !(ss_e2 & DESC_W_MASK))
2966 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2967 if (!(ss_e2 & DESC_P_MASK))
2968#ifdef VBOX /* See page 3-99 of 253666.pdf */
2969 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2970#else
2971 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2972#endif
2973
2974 // push_size = ((param_count * 2) + 8) << shift;
2975
2976 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2977 old_ssp = env->segs[R_SS].base;
2978
2979 sp_mask = get_sp_mask(ss_e2);
2980 ssp = get_seg_base(ss_e1, ss_e2);
2981 if (shift) {
2982 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2983 PUSHL(ssp, sp, sp_mask, ESP);
2984 for(i = param_count - 1; i >= 0; i--) {
2985 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2986 PUSHL(ssp, sp, sp_mask, val);
2987 }
2988 } else {
2989 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2990 PUSHW(ssp, sp, sp_mask, ESP);
2991 for(i = param_count - 1; i >= 0; i--) {
2992 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2993 PUSHW(ssp, sp, sp_mask, val);
2994 }
2995 }
2996 new_stack = 1;
2997 } else {
2998 /* to same privilege */
2999 sp = ESP;
3000 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3001 ssp = env->segs[R_SS].base;
3002 // push_size = (4 << shift);
3003 new_stack = 0;
3004 }
3005
3006 if (shift) {
3007 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3008 PUSHL(ssp, sp, sp_mask, next_eip);
3009 } else {
3010 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3011 PUSHW(ssp, sp, sp_mask, next_eip);
3012 }
3013
3014 /* from this point, not restartable */
3015
3016 if (new_stack) {
3017 ss = (ss & ~3) | dpl;
3018 cpu_x86_load_seg_cache(env, R_SS, ss,
3019 ssp,
3020 get_seg_limit(ss_e1, ss_e2),
3021 ss_e2);
3022 }
3023
3024 selector = (selector & ~3) | dpl;
3025 cpu_x86_load_seg_cache(env, R_CS, selector,
3026 get_seg_base(e1, e2),
3027 get_seg_limit(e1, e2),
3028 e2);
3029 cpu_x86_set_cpl(env, dpl);
3030 SET_ESP(sp, sp_mask);
3031 EIP = offset;
3032 }
3033}
3034
3035/* real and vm86 mode iret */
3036void helper_iret_real(int shift)
3037{
3038 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3039 target_ulong ssp;
3040 int eflags_mask;
3041#ifdef VBOX
3042 bool fVME = false;
3043
3044 remR3TrapClear(env->pVM);
3045#endif /* VBOX */
3046
3047 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3048 sp = ESP;
3049 ssp = env->segs[R_SS].base;
3050 if (shift == 1) {
3051 /* 32 bits */
3052 POPL(ssp, sp, sp_mask, new_eip);
3053 POPL(ssp, sp, sp_mask, new_cs);
3054 new_cs &= 0xffff;
3055 POPL(ssp, sp, sp_mask, new_eflags);
3056 } else {
3057 /* 16 bits */
3058 POPW(ssp, sp, sp_mask, new_eip);
3059 POPW(ssp, sp, sp_mask, new_cs);
3060 POPW(ssp, sp, sp_mask, new_eflags);
3061 }
3062#ifdef VBOX
3063 if ( (env->eflags & VM_MASK)
3064 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3065 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3066 {
3067 fVME = true;
3068 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3069 /* if TF will be set -> #GP */
3070 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3071 || (new_eflags & TF_MASK))
3072 raise_exception(EXCP0D_GPF);
3073 }
3074#endif /* VBOX */
3075 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3076 env->segs[R_CS].selector = new_cs;
3077 env->segs[R_CS].base = (new_cs << 4);
3078 env->eip = new_eip;
3079#ifdef VBOX
3080 if (fVME)
3081 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3082 else
3083#endif
3084 if (env->eflags & VM_MASK)
3085 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3086 else
3087 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3088 if (shift == 0)
3089 eflags_mask &= 0xffff;
3090 load_eflags(new_eflags, eflags_mask);
3091 env->hflags2 &= ~HF2_NMI_MASK;
3092#ifdef VBOX
3093 if (fVME)
3094 {
3095 if (new_eflags & IF_MASK)
3096 env->eflags |= VIF_MASK;
3097 else
3098 env->eflags &= ~VIF_MASK;
3099 }
3100#endif /* VBOX */
3101}
3102
3103static inline void validate_seg(int seg_reg, int cpl)
3104{
3105 int dpl;
3106 uint32_t e2;
3107
3108 /* XXX: on x86_64, we do not want to nullify FS and GS because
3109 they may still contain a valid base. I would be interested to
3110 know how a real x86_64 CPU behaves */
3111 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3112 (env->segs[seg_reg].selector & 0xfffc) == 0)
3113 return;
3114
3115 e2 = env->segs[seg_reg].flags;
3116 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3117 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3118 /* data or non conforming code segment */
3119 if (dpl < cpl) {
3120 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3121 }
3122 }
3123}
3124
3125/* protected mode iret */
3126static inline void helper_ret_protected(int shift, int is_iret, int addend)
3127{
3128 uint32_t new_cs, new_eflags, new_ss;
3129 uint32_t new_es, new_ds, new_fs, new_gs;
3130 uint32_t e1, e2, ss_e1, ss_e2;
3131 int cpl, dpl, rpl, eflags_mask, iopl;
3132 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3133
3134#ifdef VBOX /** @todo Why do we do this? */
3135 ss_e1 = ss_e2 = e1 = e2 = 0;
3136#endif
3137
3138#ifdef TARGET_X86_64
3139 if (shift == 2)
3140 sp_mask = -1;
3141 else
3142#endif
3143 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3144 sp = ESP;
3145 ssp = env->segs[R_SS].base;
3146 new_eflags = 0; /* avoid warning */
3147#ifdef TARGET_X86_64
3148 if (shift == 2) {
3149 POPQ(sp, new_eip);
3150 POPQ(sp, new_cs);
3151 new_cs &= 0xffff;
3152 if (is_iret) {
3153 POPQ(sp, new_eflags);
3154 }
3155 } else
3156#endif
3157 if (shift == 1) {
3158 /* 32 bits */
3159 POPL(ssp, sp, sp_mask, new_eip);
3160 POPL(ssp, sp, sp_mask, new_cs);
3161 new_cs &= 0xffff;
3162 if (is_iret) {
3163 POPL(ssp, sp, sp_mask, new_eflags);
3164#if defined(VBOX) && defined(DEBUG)
3165 printf("iret: new CS %04X\n", new_cs);
3166 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3167 printf("iret: new EFLAGS %08X\n", new_eflags);
3168 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3169#endif
3170 if (new_eflags & VM_MASK)
3171 goto return_to_vm86;
3172 }
3173#ifdef VBOX
3174 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3175 {
3176# ifdef DEBUG
3177 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3178# endif
3179 new_cs = new_cs & 0xfffc;
3180 }
3181#endif
3182 } else {
3183 /* 16 bits */
3184 POPW(ssp, sp, sp_mask, new_eip);
3185 POPW(ssp, sp, sp_mask, new_cs);
3186 if (is_iret)
3187 POPW(ssp, sp, sp_mask, new_eflags);
3188 }
3189 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3190 new_cs, new_eip, shift, addend);
3191 LOG_PCALL_STATE(env);
3192 if ((new_cs & 0xfffc) == 0)
3193 {
3194#if defined(VBOX) && defined(DEBUG)
3195 printf("new_cs & 0xfffc) == 0\n");
3196#endif
3197 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3198 }
3199 if (load_segment(&e1, &e2, new_cs) != 0)
3200 {
3201#if defined(VBOX) && defined(DEBUG)
3202 printf("load_segment failed\n");
3203#endif
3204 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3205 }
3206 if (!(e2 & DESC_S_MASK) ||
3207 !(e2 & DESC_CS_MASK))
3208 {
3209#if defined(VBOX) && defined(DEBUG)
3210 printf("e2 mask %08x\n", e2);
3211#endif
3212 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3213 }
3214 cpl = env->hflags & HF_CPL_MASK;
3215 rpl = new_cs & 3;
3216 if (rpl < cpl)
3217 {
3218#if defined(VBOX) && defined(DEBUG)
3219 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3220#endif
3221 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3222 }
3223 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3224 if (e2 & DESC_C_MASK) {
3225 if (dpl > rpl)
3226 {
3227#if defined(VBOX) && defined(DEBUG)
3228 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3229#endif
3230 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3231 }
3232 } else {
3233 if (dpl != rpl)
3234 {
3235#if defined(VBOX) && defined(DEBUG)
3236 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3237#endif
3238 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3239 }
3240 }
3241 if (!(e2 & DESC_P_MASK))
3242 {
3243#if defined(VBOX) && defined(DEBUG)
3244 printf("DESC_P_MASK e2=%08x\n", e2);
3245#endif
3246 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3247 }
3248
3249 sp += addend;
3250 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3251 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3252 /* return to same privilege level */
3253#ifdef VBOX
3254 if (!(e2 & DESC_A_MASK))
3255 e2 = set_segment_accessed(new_cs, e2);
3256#endif
3257 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3258 get_seg_base(e1, e2),
3259 get_seg_limit(e1, e2),
3260 e2);
3261 } else {
3262 /* return to different privilege level */
3263#ifdef TARGET_X86_64
3264 if (shift == 2) {
3265 POPQ(sp, new_esp);
3266 POPQ(sp, new_ss);
3267 new_ss &= 0xffff;
3268 } else
3269#endif
3270 if (shift == 1) {
3271 /* 32 bits */
3272 POPL(ssp, sp, sp_mask, new_esp);
3273 POPL(ssp, sp, sp_mask, new_ss);
3274 new_ss &= 0xffff;
3275 } else {
3276 /* 16 bits */
3277 POPW(ssp, sp, sp_mask, new_esp);
3278 POPW(ssp, sp, sp_mask, new_ss);
3279 }
3280 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3281 new_ss, new_esp);
3282 if ((new_ss & 0xfffc) == 0) {
3283#ifdef TARGET_X86_64
3284 /* NULL ss is allowed in long mode if cpl != 3*/
3285 /* XXX: test CS64 ? */
3286 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3287# ifdef VBOX
3288 if (!(e2 & DESC_A_MASK))
3289 e2 = set_segment_accessed(new_cs, e2);
3290# endif
3291 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3292 0, 0xffffffff,
3293 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3294 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3295 DESC_W_MASK | DESC_A_MASK);
3296 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3297 } else
3298#endif
3299 {
3300 raise_exception_err(EXCP0D_GPF, 0);
3301 }
3302 } else {
3303 if ((new_ss & 3) != rpl)
3304 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3305 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3306 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3307 if (!(ss_e2 & DESC_S_MASK) ||
3308 (ss_e2 & DESC_CS_MASK) ||
3309 !(ss_e2 & DESC_W_MASK))
3310 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3311 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3312 if (dpl != rpl)
3313 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3314 if (!(ss_e2 & DESC_P_MASK))
3315 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3316#ifdef VBOX
3317 if (!(e2 & DESC_A_MASK))
3318 e2 = set_segment_accessed(new_cs, e2);
3319 if (!(ss_e2 & DESC_A_MASK))
3320 ss_e2 = set_segment_accessed(new_ss, e2);
3321#endif
3322 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3323 get_seg_base(ss_e1, ss_e2),
3324 get_seg_limit(ss_e1, ss_e2),
3325 ss_e2);
3326 }
3327
3328 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3329 get_seg_base(e1, e2),
3330 get_seg_limit(e1, e2),
3331 e2);
3332 cpu_x86_set_cpl(env, rpl);
3333 sp = new_esp;
3334#ifdef TARGET_X86_64
3335 if (env->hflags & HF_CS64_MASK)
3336 sp_mask = -1;
3337 else
3338#endif
3339 sp_mask = get_sp_mask(ss_e2);
3340
3341 /* validate data segments */
3342 validate_seg(R_ES, rpl);
3343 validate_seg(R_DS, rpl);
3344 validate_seg(R_FS, rpl);
3345 validate_seg(R_GS, rpl);
3346
3347 sp += addend;
3348 }
3349 SET_ESP(sp, sp_mask);
3350 env->eip = new_eip;
3351 if (is_iret) {
3352 /* NOTE: 'cpl' is the _old_ CPL */
3353 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3354 if (cpl == 0)
3355#ifdef VBOX
3356 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3357#else
3358 eflags_mask |= IOPL_MASK;
3359#endif
3360 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3361 if (cpl <= iopl)
3362 eflags_mask |= IF_MASK;
3363 if (shift == 0)
3364 eflags_mask &= 0xffff;
3365 load_eflags(new_eflags, eflags_mask);
3366 }
3367 return;
3368
3369 return_to_vm86:
3370 POPL(ssp, sp, sp_mask, new_esp);
3371 POPL(ssp, sp, sp_mask, new_ss);
3372 POPL(ssp, sp, sp_mask, new_es);
3373 POPL(ssp, sp, sp_mask, new_ds);
3374 POPL(ssp, sp, sp_mask, new_fs);
3375 POPL(ssp, sp, sp_mask, new_gs);
3376
3377 /* modify processor state */
3378 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3379 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3380 load_seg_vm(R_CS, new_cs & 0xffff);
3381 cpu_x86_set_cpl(env, 3);
3382 load_seg_vm(R_SS, new_ss & 0xffff);
3383 load_seg_vm(R_ES, new_es & 0xffff);
3384 load_seg_vm(R_DS, new_ds & 0xffff);
3385 load_seg_vm(R_FS, new_fs & 0xffff);
3386 load_seg_vm(R_GS, new_gs & 0xffff);
3387
3388 env->eip = new_eip & 0xffff;
3389 ESP = new_esp;
3390}
3391
3392void helper_iret_protected(int shift, int next_eip)
3393{
3394 int tss_selector, type;
3395 uint32_t e1, e2;
3396
3397#ifdef VBOX
3398 e1 = e2 = 0; /** @todo Why do we do this? */
3399 remR3TrapClear(env->pVM);
3400#endif
3401
3402 /* specific case for TSS */
3403 if (env->eflags & NT_MASK) {
3404#ifdef TARGET_X86_64
3405 if (env->hflags & HF_LMA_MASK)
3406 raise_exception_err(EXCP0D_GPF, 0);
3407#endif
3408 tss_selector = lduw_kernel(env->tr.base + 0);
3409 if (tss_selector & 4)
3410 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3411 if (load_segment(&e1, &e2, tss_selector) != 0)
3412 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3413 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3414 /* NOTE: we check both segment and busy TSS */
3415 if (type != 3)
3416 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3417 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3418 } else {
3419 helper_ret_protected(shift, 1, 0);
3420 }
3421 env->hflags2 &= ~HF2_NMI_MASK;
3422}
3423
3424void helper_lret_protected(int shift, int addend)
3425{
3426 helper_ret_protected(shift, 0, addend);
3427}
3428
3429void helper_sysenter(void)
3430{
3431 if (env->sysenter_cs == 0) {
3432 raise_exception_err(EXCP0D_GPF, 0);
3433 }
3434 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3435 cpu_x86_set_cpl(env, 0);
3436
3437#ifdef TARGET_X86_64
3438 if (env->hflags & HF_LMA_MASK) {
3439 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3440 0, 0xffffffff,
3441 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3442 DESC_S_MASK |
3443 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3444 } else
3445#endif
3446 {
3447 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3448 0, 0xffffffff,
3449 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3450 DESC_S_MASK |
3451 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3452 }
3453 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3454 0, 0xffffffff,
3455 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3456 DESC_S_MASK |
3457 DESC_W_MASK | DESC_A_MASK);
3458 ESP = env->sysenter_esp;
3459 EIP = env->sysenter_eip;
3460}
3461
3462void helper_sysexit(int dflag)
3463{
3464 int cpl;
3465
3466 cpl = env->hflags & HF_CPL_MASK;
3467 if (env->sysenter_cs == 0 || cpl != 0) {
3468 raise_exception_err(EXCP0D_GPF, 0);
3469 }
3470 cpu_x86_set_cpl(env, 3);
3471#ifdef TARGET_X86_64
3472 if (dflag == 2) {
3473 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3474 0, 0xffffffff,
3475 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3476 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3477 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3478 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3479 0, 0xffffffff,
3480 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3481 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3482 DESC_W_MASK | DESC_A_MASK);
3483 } else
3484#endif
3485 {
3486 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3487 0, 0xffffffff,
3488 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3489 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3490 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3491 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3492 0, 0xffffffff,
3493 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3494 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3495 DESC_W_MASK | DESC_A_MASK);
3496 }
3497 ESP = ECX;
3498 EIP = EDX;
3499}
3500
3501#if defined(CONFIG_USER_ONLY)
3502target_ulong helper_read_crN(int reg)
3503{
3504 return 0;
3505}
3506
3507void helper_write_crN(int reg, target_ulong t0)
3508{
3509}
3510
3511void helper_movl_drN_T0(int reg, target_ulong t0)
3512{
3513}
3514#else
3515target_ulong helper_read_crN(int reg)
3516{
3517 target_ulong val;
3518
3519 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3520 switch(reg) {
3521 default:
3522 val = env->cr[reg];
3523 break;
3524 case 8:
3525 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3526#ifndef VBOX
3527 val = cpu_get_apic_tpr(env->apic_state);
3528#else /* VBOX */
3529 val = cpu_get_apic_tpr(env);
3530#endif /* VBOX */
3531 } else {
3532 val = env->v_tpr;
3533 }
3534 break;
3535 }
3536 return val;
3537}
3538
3539void helper_write_crN(int reg, target_ulong t0)
3540{
3541 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3542 switch(reg) {
3543 case 0:
3544 cpu_x86_update_cr0(env, t0);
3545 break;
3546 case 3:
3547 cpu_x86_update_cr3(env, t0);
3548 break;
3549 case 4:
3550 cpu_x86_update_cr4(env, t0);
3551 break;
3552 case 8:
3553 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3554#ifndef VBOX
3555 cpu_set_apic_tpr(env->apic_state, t0);
3556#else /* VBOX */
3557 cpu_set_apic_tpr(env, t0);
3558#endif /* VBOX */
3559 }
3560 env->v_tpr = t0 & 0x0f;
3561 break;
3562 default:
3563 env->cr[reg] = t0;
3564 break;
3565 }
3566}
3567
3568void helper_movl_drN_T0(int reg, target_ulong t0)
3569{
3570 int i;
3571
3572 if (reg < 4) {
3573 hw_breakpoint_remove(env, reg);
3574 env->dr[reg] = t0;
3575 hw_breakpoint_insert(env, reg);
3576 } else if (reg == 7) {
3577 for (i = 0; i < 4; i++)
3578 hw_breakpoint_remove(env, i);
3579 env->dr[7] = t0;
3580 for (i = 0; i < 4; i++)
3581 hw_breakpoint_insert(env, i);
3582 } else
3583 env->dr[reg] = t0;
3584}
3585#endif
3586
3587void helper_lmsw(target_ulong t0)
3588{
3589 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3590 if already set to one. */
3591 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3592 helper_write_crN(0, t0);
3593}
3594
3595void helper_clts(void)
3596{
3597 env->cr[0] &= ~CR0_TS_MASK;
3598 env->hflags &= ~HF_TS_MASK;
3599}
3600
3601void helper_invlpg(target_ulong addr)
3602{
3603 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3604 tlb_flush_page(env, addr);
3605}
3606
3607void helper_rdtsc(void)
3608{
3609 uint64_t val;
3610
3611 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3612 raise_exception(EXCP0D_GPF);
3613 }
3614 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3615
3616 val = cpu_get_tsc(env) + env->tsc_offset;
3617 EAX = (uint32_t)(val);
3618 EDX = (uint32_t)(val >> 32);
3619}
3620
3621void helper_rdtscp(void)
3622{
3623 helper_rdtsc();
3624#ifndef VBOX
3625 ECX = (uint32_t)(env->tsc_aux);
3626#else /* VBOX */
3627 uint64_t val;
3628 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3629 ECX = (uint32_t)(val);
3630 else
3631 ECX = 0;
3632#endif /* VBOX */
3633}
3634
3635void helper_rdpmc(void)
3636{
3637#ifdef VBOX
3638 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3639 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3640 raise_exception(EXCP0D_GPF);
3641 }
3642 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3643 EAX = 0;
3644 EDX = 0;
3645#else /* !VBOX */
3646 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3647 raise_exception(EXCP0D_GPF);
3648 }
3649 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3650
3651 /* currently unimplemented */
3652 raise_exception_err(EXCP06_ILLOP, 0);
3653#endif /* !VBOX */
3654}
3655
3656#if defined(CONFIG_USER_ONLY)
3657void helper_wrmsr(void)
3658{
3659}
3660
3661void helper_rdmsr(void)
3662{
3663}
3664#else
3665void helper_wrmsr(void)
3666{
3667 uint64_t val;
3668
3669 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3670
3671 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3672
3673 switch((uint32_t)ECX) {
3674 case MSR_IA32_SYSENTER_CS:
3675 env->sysenter_cs = val & 0xffff;
3676 break;
3677 case MSR_IA32_SYSENTER_ESP:
3678 env->sysenter_esp = val;
3679 break;
3680 case MSR_IA32_SYSENTER_EIP:
3681 env->sysenter_eip = val;
3682 break;
3683 case MSR_IA32_APICBASE:
3684# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3685 cpu_set_apic_base(env->apic_state, val);
3686# endif
3687 break;
3688 case MSR_EFER:
3689 {
3690 uint64_t update_mask;
3691 update_mask = 0;
3692 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3693 update_mask |= MSR_EFER_SCE;
3694 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3695 update_mask |= MSR_EFER_LME;
3696 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3697 update_mask |= MSR_EFER_FFXSR;
3698 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3699 update_mask |= MSR_EFER_NXE;
3700 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3701 update_mask |= MSR_EFER_SVME;
3702 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3703 update_mask |= MSR_EFER_FFXSR;
3704 cpu_load_efer(env, (env->efer & ~update_mask) |
3705 (val & update_mask));
3706 }
3707 break;
3708 case MSR_STAR:
3709 env->star = val;
3710 break;
3711 case MSR_PAT:
3712 env->pat = val;
3713 break;
3714 case MSR_VM_HSAVE_PA:
3715 env->vm_hsave = val;
3716 break;
3717#ifdef TARGET_X86_64
3718 case MSR_LSTAR:
3719 env->lstar = val;
3720 break;
3721 case MSR_CSTAR:
3722 env->cstar = val;
3723 break;
3724 case MSR_FMASK:
3725 env->fmask = val;
3726 break;
3727 case MSR_FSBASE:
3728 env->segs[R_FS].base = val;
3729 break;
3730 case MSR_GSBASE:
3731 env->segs[R_GS].base = val;
3732 break;
3733 case MSR_KERNELGSBASE:
3734 env->kernelgsbase = val;
3735 break;
3736#endif
3737# ifndef VBOX
3738 case MSR_MTRRphysBase(0):
3739 case MSR_MTRRphysBase(1):
3740 case MSR_MTRRphysBase(2):
3741 case MSR_MTRRphysBase(3):
3742 case MSR_MTRRphysBase(4):
3743 case MSR_MTRRphysBase(5):
3744 case MSR_MTRRphysBase(6):
3745 case MSR_MTRRphysBase(7):
3746 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3747 break;
3748 case MSR_MTRRphysMask(0):
3749 case MSR_MTRRphysMask(1):
3750 case MSR_MTRRphysMask(2):
3751 case MSR_MTRRphysMask(3):
3752 case MSR_MTRRphysMask(4):
3753 case MSR_MTRRphysMask(5):
3754 case MSR_MTRRphysMask(6):
3755 case MSR_MTRRphysMask(7):
3756 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3757 break;
3758 case MSR_MTRRfix64K_00000:
3759 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3760 break;
3761 case MSR_MTRRfix16K_80000:
3762 case MSR_MTRRfix16K_A0000:
3763 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3764 break;
3765 case MSR_MTRRfix4K_C0000:
3766 case MSR_MTRRfix4K_C8000:
3767 case MSR_MTRRfix4K_D0000:
3768 case MSR_MTRRfix4K_D8000:
3769 case MSR_MTRRfix4K_E0000:
3770 case MSR_MTRRfix4K_E8000:
3771 case MSR_MTRRfix4K_F0000:
3772 case MSR_MTRRfix4K_F8000:
3773 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3774 break;
3775 case MSR_MTRRdefType:
3776 env->mtrr_deftype = val;
3777 break;
3778 case MSR_MCG_STATUS:
3779 env->mcg_status = val;
3780 break;
3781 case MSR_MCG_CTL:
3782 if ((env->mcg_cap & MCG_CTL_P)
3783 && (val == 0 || val == ~(uint64_t)0))
3784 env->mcg_ctl = val;
3785 break;
3786 case MSR_TSC_AUX:
3787 env->tsc_aux = val;
3788 break;
3789# endif /* !VBOX */
3790 default:
3791# ifndef VBOX
3792 if ((uint32_t)ECX >= MSR_MC0_CTL
3793 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3794 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3795 if ((offset & 0x3) != 0
3796 || (val == 0 || val == ~(uint64_t)0))
3797 env->mce_banks[offset] = val;
3798 break;
3799 }
3800 /* XXX: exception ? */
3801# endif
3802 break;
3803 }
3804
3805# ifdef VBOX
3806 /* call CPUM. */
3807 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3808 {
3809 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3810 }
3811# endif
3812}
3813
3814void helper_rdmsr(void)
3815{
3816 uint64_t val;
3817
3818 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3819
3820 switch((uint32_t)ECX) {
3821 case MSR_IA32_SYSENTER_CS:
3822 val = env->sysenter_cs;
3823 break;
3824 case MSR_IA32_SYSENTER_ESP:
3825 val = env->sysenter_esp;
3826 break;
3827 case MSR_IA32_SYSENTER_EIP:
3828 val = env->sysenter_eip;
3829 break;
3830 case MSR_IA32_APICBASE:
3831#ifndef VBOX
3832 val = cpu_get_apic_base(env->apic_state);
3833#else /* VBOX */
3834 val = cpu_get_apic_base(env);
3835#endif /* VBOX */
3836 break;
3837 case MSR_EFER:
3838 val = env->efer;
3839 break;
3840 case MSR_STAR:
3841 val = env->star;
3842 break;
3843 case MSR_PAT:
3844 val = env->pat;
3845 break;
3846 case MSR_VM_HSAVE_PA:
3847 val = env->vm_hsave;
3848 break;
3849# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3850 case MSR_IA32_PERF_STATUS:
3851 /* tsc_increment_by_tick */
3852 val = 1000ULL;
3853 /* CPU multiplier */
3854 val |= (((uint64_t)4ULL) << 40);
3855 break;
3856# endif /* !VBOX */
3857#ifdef TARGET_X86_64
3858 case MSR_LSTAR:
3859 val = env->lstar;
3860 break;
3861 case MSR_CSTAR:
3862 val = env->cstar;
3863 break;
3864 case MSR_FMASK:
3865 val = env->fmask;
3866 break;
3867 case MSR_FSBASE:
3868 val = env->segs[R_FS].base;
3869 break;
3870 case MSR_GSBASE:
3871 val = env->segs[R_GS].base;
3872 break;
3873 case MSR_KERNELGSBASE:
3874 val = env->kernelgsbase;
3875 break;
3876# ifndef VBOX
3877 case MSR_TSC_AUX:
3878 val = env->tsc_aux;
3879 break;
3880# endif /*!VBOX*/
3881#endif
3882# ifndef VBOX
3883 case MSR_MTRRphysBase(0):
3884 case MSR_MTRRphysBase(1):
3885 case MSR_MTRRphysBase(2):
3886 case MSR_MTRRphysBase(3):
3887 case MSR_MTRRphysBase(4):
3888 case MSR_MTRRphysBase(5):
3889 case MSR_MTRRphysBase(6):
3890 case MSR_MTRRphysBase(7):
3891 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3892 break;
3893 case MSR_MTRRphysMask(0):
3894 case MSR_MTRRphysMask(1):
3895 case MSR_MTRRphysMask(2):
3896 case MSR_MTRRphysMask(3):
3897 case MSR_MTRRphysMask(4):
3898 case MSR_MTRRphysMask(5):
3899 case MSR_MTRRphysMask(6):
3900 case MSR_MTRRphysMask(7):
3901 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3902 break;
3903 case MSR_MTRRfix64K_00000:
3904 val = env->mtrr_fixed[0];
3905 break;
3906 case MSR_MTRRfix16K_80000:
3907 case MSR_MTRRfix16K_A0000:
3908 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3909 break;
3910 case MSR_MTRRfix4K_C0000:
3911 case MSR_MTRRfix4K_C8000:
3912 case MSR_MTRRfix4K_D0000:
3913 case MSR_MTRRfix4K_D8000:
3914 case MSR_MTRRfix4K_E0000:
3915 case MSR_MTRRfix4K_E8000:
3916 case MSR_MTRRfix4K_F0000:
3917 case MSR_MTRRfix4K_F8000:
3918 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3919 break;
3920 case MSR_MTRRdefType:
3921 val = env->mtrr_deftype;
3922 break;
3923 case MSR_MTRRcap:
3924 if (env->cpuid_features & CPUID_MTRR)
3925 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3926 else
3927 /* XXX: exception ? */
3928 val = 0;
3929 break;
3930 case MSR_MCG_CAP:
3931 val = env->mcg_cap;
3932 break;
3933 case MSR_MCG_CTL:
3934 if (env->mcg_cap & MCG_CTL_P)
3935 val = env->mcg_ctl;
3936 else
3937 val = 0;
3938 break;
3939 case MSR_MCG_STATUS:
3940 val = env->mcg_status;
3941 break;
3942# endif /* !VBOX */
3943 default:
3944# ifndef VBOX
3945 if ((uint32_t)ECX >= MSR_MC0_CTL
3946 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3947 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3948 val = env->mce_banks[offset];
3949 break;
3950 }
3951 /* XXX: exception ? */
3952 val = 0;
3953# else /* VBOX */
3954 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3955 {
3956 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3957 val = 0;
3958 }
3959# endif /* VBOX */
3960 break;
3961 }
3962 EAX = (uint32_t)(val);
3963 EDX = (uint32_t)(val >> 32);
3964
3965# ifdef VBOX_STRICT
3966 if ((uint32_t)ECX != MSR_IA32_TSC) {
3967 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3968 val = 0;
3969 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
3970 }
3971# endif
3972}
3973#endif
3974
3975target_ulong helper_lsl(target_ulong selector1)
3976{
3977 unsigned int limit;
3978 uint32_t e1, e2, eflags, selector;
3979 int rpl, dpl, cpl, type;
3980
3981 selector = selector1 & 0xffff;
3982 eflags = helper_cc_compute_all(CC_OP);
3983 if ((selector & 0xfffc) == 0)
3984 goto fail;
3985 if (load_segment(&e1, &e2, selector) != 0)
3986 goto fail;
3987 rpl = selector & 3;
3988 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3989 cpl = env->hflags & HF_CPL_MASK;
3990 if (e2 & DESC_S_MASK) {
3991 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3992 /* conforming */
3993 } else {
3994 if (dpl < cpl || dpl < rpl)
3995 goto fail;
3996 }
3997 } else {
3998 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3999 switch(type) {
4000 case 1:
4001 case 2:
4002 case 3:
4003 case 9:
4004 case 11:
4005 break;
4006 default:
4007 goto fail;
4008 }
4009 if (dpl < cpl || dpl < rpl) {
4010 fail:
4011 CC_SRC = eflags & ~CC_Z;
4012 return 0;
4013 }
4014 }
4015 limit = get_seg_limit(e1, e2);
4016 CC_SRC = eflags | CC_Z;
4017 return limit;
4018}
4019
4020target_ulong helper_lar(target_ulong selector1)
4021{
4022 uint32_t e1, e2, eflags, selector;
4023 int rpl, dpl, cpl, type;
4024
4025 selector = selector1 & 0xffff;
4026 eflags = helper_cc_compute_all(CC_OP);
4027 if ((selector & 0xfffc) == 0)
4028 goto fail;
4029 if (load_segment(&e1, &e2, selector) != 0)
4030 goto fail;
4031 rpl = selector & 3;
4032 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4033 cpl = env->hflags & HF_CPL_MASK;
4034 if (e2 & DESC_S_MASK) {
4035 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4036 /* conforming */
4037 } else {
4038 if (dpl < cpl || dpl < rpl)
4039 goto fail;
4040 }
4041 } else {
4042 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4043 switch(type) {
4044 case 1:
4045 case 2:
4046 case 3:
4047 case 4:
4048 case 5:
4049 case 9:
4050 case 11:
4051 case 12:
4052 break;
4053 default:
4054 goto fail;
4055 }
4056 if (dpl < cpl || dpl < rpl) {
4057 fail:
4058 CC_SRC = eflags & ~CC_Z;
4059 return 0;
4060 }
4061 }
4062 CC_SRC = eflags | CC_Z;
4063 return e2 & 0x00f0ff00;
4064}
4065
4066void helper_verr(target_ulong selector1)
4067{
4068 uint32_t e1, e2, eflags, selector;
4069 int rpl, dpl, cpl;
4070
4071 selector = selector1 & 0xffff;
4072 eflags = helper_cc_compute_all(CC_OP);
4073 if ((selector & 0xfffc) == 0)
4074 goto fail;
4075 if (load_segment(&e1, &e2, selector) != 0)
4076 goto fail;
4077 if (!(e2 & DESC_S_MASK))
4078 goto fail;
4079 rpl = selector & 3;
4080 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4081 cpl = env->hflags & HF_CPL_MASK;
4082 if (e2 & DESC_CS_MASK) {
4083 if (!(e2 & DESC_R_MASK))
4084 goto fail;
4085 if (!(e2 & DESC_C_MASK)) {
4086 if (dpl < cpl || dpl < rpl)
4087 goto fail;
4088 }
4089 } else {
4090 if (dpl < cpl || dpl < rpl) {
4091 fail:
4092 CC_SRC = eflags & ~CC_Z;
4093 return;
4094 }
4095 }
4096 CC_SRC = eflags | CC_Z;
4097}
4098
4099void helper_verw(target_ulong selector1)
4100{
4101 uint32_t e1, e2, eflags, selector;
4102 int rpl, dpl, cpl;
4103
4104 selector = selector1 & 0xffff;
4105 eflags = helper_cc_compute_all(CC_OP);
4106 if ((selector & 0xfffc) == 0)
4107 goto fail;
4108 if (load_segment(&e1, &e2, selector) != 0)
4109 goto fail;
4110 if (!(e2 & DESC_S_MASK))
4111 goto fail;
4112 rpl = selector & 3;
4113 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4114 cpl = env->hflags & HF_CPL_MASK;
4115 if (e2 & DESC_CS_MASK) {
4116 goto fail;
4117 } else {
4118 if (dpl < cpl || dpl < rpl)
4119 goto fail;
4120 if (!(e2 & DESC_W_MASK)) {
4121 fail:
4122 CC_SRC = eflags & ~CC_Z;
4123 return;
4124 }
4125 }
4126 CC_SRC = eflags | CC_Z;
4127}
4128
4129/* x87 FPU helpers */
4130
4131static void fpu_set_exception(int mask)
4132{
4133 env->fpus |= mask;
4134 if (env->fpus & (~env->fpuc & FPUC_EM))
4135 env->fpus |= FPUS_SE | FPUS_B;
4136}
4137
4138static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4139{
4140 if (b == 0.0)
4141 fpu_set_exception(FPUS_ZE);
4142 return a / b;
4143}
4144
4145static void fpu_raise_exception(void)
4146{
4147 if (env->cr[0] & CR0_NE_MASK) {
4148 raise_exception(EXCP10_COPR);
4149 }
4150#if !defined(CONFIG_USER_ONLY)
4151 else {
4152 cpu_set_ferr(env);
4153 }
4154#endif
4155}
4156
4157void helper_flds_FT0(uint32_t val)
4158{
4159 union {
4160 float32 f;
4161 uint32_t i;
4162 } u;
4163 u.i = val;
4164 FT0 = float32_to_floatx(u.f, &env->fp_status);
4165}
4166
4167void helper_fldl_FT0(uint64_t val)
4168{
4169 union {
4170 float64 f;
4171 uint64_t i;
4172 } u;
4173 u.i = val;
4174 FT0 = float64_to_floatx(u.f, &env->fp_status);
4175}
4176
4177void helper_fildl_FT0(int32_t val)
4178{
4179 FT0 = int32_to_floatx(val, &env->fp_status);
4180}
4181
4182void helper_flds_ST0(uint32_t val)
4183{
4184 int new_fpstt;
4185 union {
4186 float32 f;
4187 uint32_t i;
4188 } u;
4189 new_fpstt = (env->fpstt - 1) & 7;
4190 u.i = val;
4191 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4192 env->fpstt = new_fpstt;
4193 env->fptags[new_fpstt] = 0; /* validate stack entry */
4194}
4195
4196void helper_fldl_ST0(uint64_t val)
4197{
4198 int new_fpstt;
4199 union {
4200 float64 f;
4201 uint64_t i;
4202 } u;
4203 new_fpstt = (env->fpstt - 1) & 7;
4204 u.i = val;
4205 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4206 env->fpstt = new_fpstt;
4207 env->fptags[new_fpstt] = 0; /* validate stack entry */
4208}
4209
4210void helper_fildl_ST0(int32_t val)
4211{
4212 int new_fpstt;
4213 new_fpstt = (env->fpstt - 1) & 7;
4214 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4215 env->fpstt = new_fpstt;
4216 env->fptags[new_fpstt] = 0; /* validate stack entry */
4217}
4218
4219void helper_fildll_ST0(int64_t val)
4220{
4221 int new_fpstt;
4222 new_fpstt = (env->fpstt - 1) & 7;
4223 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4224 env->fpstt = new_fpstt;
4225 env->fptags[new_fpstt] = 0; /* validate stack entry */
4226}
4227
4228#ifndef VBOX
4229uint32_t helper_fsts_ST0(void)
4230#else
4231RTCCUINTREG helper_fsts_ST0(void)
4232#endif
4233{
4234 union {
4235 float32 f;
4236 uint32_t i;
4237 } u;
4238 u.f = floatx_to_float32(ST0, &env->fp_status);
4239 return u.i;
4240}
4241
4242uint64_t helper_fstl_ST0(void)
4243{
4244 union {
4245 float64 f;
4246 uint64_t i;
4247 } u;
4248 u.f = floatx_to_float64(ST0, &env->fp_status);
4249 return u.i;
4250}
4251
4252#ifndef VBOX
4253int32_t helper_fist_ST0(void)
4254#else
4255RTCCINTREG helper_fist_ST0(void)
4256#endif
4257{
4258 int32_t val;
4259 val = floatx_to_int32(ST0, &env->fp_status);
4260 if (val != (int16_t)val)
4261 val = -32768;
4262 return val;
4263}
4264
4265#ifndef VBOX
4266int32_t helper_fistl_ST0(void)
4267#else
4268RTCCINTREG helper_fistl_ST0(void)
4269#endif
4270{
4271 int32_t val;
4272 val = floatx_to_int32(ST0, &env->fp_status);
4273 return val;
4274}
4275
4276int64_t helper_fistll_ST0(void)
4277{
4278 int64_t val;
4279 val = floatx_to_int64(ST0, &env->fp_status);
4280 return val;
4281}
4282
4283#ifndef VBOX
4284int32_t helper_fistt_ST0(void)
4285#else
4286RTCCINTREG helper_fistt_ST0(void)
4287#endif
4288{
4289 int32_t val;
4290 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4291 if (val != (int16_t)val)
4292 val = -32768;
4293 return val;
4294}
4295
4296#ifndef VBOX
4297int32_t helper_fisttl_ST0(void)
4298#else
4299RTCCINTREG helper_fisttl_ST0(void)
4300#endif
4301{
4302 int32_t val;
4303 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4304 return val;
4305}
4306
4307int64_t helper_fisttll_ST0(void)
4308{
4309 int64_t val;
4310 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4311 return val;
4312}
4313
4314void helper_fldt_ST0(target_ulong ptr)
4315{
4316 int new_fpstt;
4317 new_fpstt = (env->fpstt - 1) & 7;
4318 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4319 env->fpstt = new_fpstt;
4320 env->fptags[new_fpstt] = 0; /* validate stack entry */
4321}
4322
4323void helper_fstt_ST0(target_ulong ptr)
4324{
4325 helper_fstt(ST0, ptr);
4326}
4327
4328void helper_fpush(void)
4329{
4330 fpush();
4331}
4332
4333void helper_fpop(void)
4334{
4335 fpop();
4336}
4337
4338void helper_fdecstp(void)
4339{
4340 env->fpstt = (env->fpstt - 1) & 7;
4341 env->fpus &= (~0x4700);
4342}
4343
4344void helper_fincstp(void)
4345{
4346 env->fpstt = (env->fpstt + 1) & 7;
4347 env->fpus &= (~0x4700);
4348}
4349
4350/* FPU move */
4351
4352void helper_ffree_STN(int st_index)
4353{
4354 env->fptags[(env->fpstt + st_index) & 7] = 1;
4355}
4356
4357void helper_fmov_ST0_FT0(void)
4358{
4359 ST0 = FT0;
4360}
4361
4362void helper_fmov_FT0_STN(int st_index)
4363{
4364 FT0 = ST(st_index);
4365}
4366
4367void helper_fmov_ST0_STN(int st_index)
4368{
4369 ST0 = ST(st_index);
4370}
4371
4372void helper_fmov_STN_ST0(int st_index)
4373{
4374 ST(st_index) = ST0;
4375}
4376
4377void helper_fxchg_ST0_STN(int st_index)
4378{
4379 CPU86_LDouble tmp;
4380 tmp = ST(st_index);
4381 ST(st_index) = ST0;
4382 ST0 = tmp;
4383}
4384
4385/* FPU operations */
4386
4387static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4388
4389void helper_fcom_ST0_FT0(void)
4390{
4391 int ret;
4392
4393 ret = floatx_compare(ST0, FT0, &env->fp_status);
4394 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4395}
4396
4397void helper_fucom_ST0_FT0(void)
4398{
4399 int ret;
4400
4401 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4402 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4403}
4404
4405static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4406
4407void helper_fcomi_ST0_FT0(void)
4408{
4409 int eflags;
4410 int ret;
4411
4412 ret = floatx_compare(ST0, FT0, &env->fp_status);
4413 eflags = helper_cc_compute_all(CC_OP);
4414 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4415 CC_SRC = eflags;
4416}
4417
4418void helper_fucomi_ST0_FT0(void)
4419{
4420 int eflags;
4421 int ret;
4422
4423 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4424 eflags = helper_cc_compute_all(CC_OP);
4425 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4426 CC_SRC = eflags;
4427}
4428
4429void helper_fadd_ST0_FT0(void)
4430{
4431 ST0 += FT0;
4432}
4433
4434void helper_fmul_ST0_FT0(void)
4435{
4436 ST0 *= FT0;
4437}
4438
4439void helper_fsub_ST0_FT0(void)
4440{
4441 ST0 -= FT0;
4442}
4443
4444void helper_fsubr_ST0_FT0(void)
4445{
4446 ST0 = FT0 - ST0;
4447}
4448
4449void helper_fdiv_ST0_FT0(void)
4450{
4451 ST0 = helper_fdiv(ST0, FT0);
4452}
4453
4454void helper_fdivr_ST0_FT0(void)
4455{
4456 ST0 = helper_fdiv(FT0, ST0);
4457}
4458
4459/* fp operations between STN and ST0 */
4460
4461void helper_fadd_STN_ST0(int st_index)
4462{
4463 ST(st_index) += ST0;
4464}
4465
4466void helper_fmul_STN_ST0(int st_index)
4467{
4468 ST(st_index) *= ST0;
4469}
4470
4471void helper_fsub_STN_ST0(int st_index)
4472{
4473 ST(st_index) -= ST0;
4474}
4475
4476void helper_fsubr_STN_ST0(int st_index)
4477{
4478 CPU86_LDouble *p;
4479 p = &ST(st_index);
4480 *p = ST0 - *p;
4481}
4482
4483void helper_fdiv_STN_ST0(int st_index)
4484{
4485 CPU86_LDouble *p;
4486 p = &ST(st_index);
4487 *p = helper_fdiv(*p, ST0);
4488}
4489
4490void helper_fdivr_STN_ST0(int st_index)
4491{
4492 CPU86_LDouble *p;
4493 p = &ST(st_index);
4494 *p = helper_fdiv(ST0, *p);
4495}
4496
4497/* misc FPU operations */
4498void helper_fchs_ST0(void)
4499{
4500 ST0 = floatx_chs(ST0);
4501}
4502
4503void helper_fabs_ST0(void)
4504{
4505 ST0 = floatx_abs(ST0);
4506}
4507
4508void helper_fld1_ST0(void)
4509{
4510 ST0 = f15rk[1];
4511}
4512
4513void helper_fldl2t_ST0(void)
4514{
4515 ST0 = f15rk[6];
4516}
4517
4518void helper_fldl2e_ST0(void)
4519{
4520 ST0 = f15rk[5];
4521}
4522
4523void helper_fldpi_ST0(void)
4524{
4525 ST0 = f15rk[2];
4526}
4527
4528void helper_fldlg2_ST0(void)
4529{
4530 ST0 = f15rk[3];
4531}
4532
4533void helper_fldln2_ST0(void)
4534{
4535 ST0 = f15rk[4];
4536}
4537
4538void helper_fldz_ST0(void)
4539{
4540 ST0 = f15rk[0];
4541}
4542
4543void helper_fldz_FT0(void)
4544{
4545 FT0 = f15rk[0];
4546}
4547
4548#ifndef VBOX
4549uint32_t helper_fnstsw(void)
4550#else
4551RTCCUINTREG helper_fnstsw(void)
4552#endif
4553{
4554 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4555}
4556
4557#ifndef VBOX
4558uint32_t helper_fnstcw(void)
4559#else
4560RTCCUINTREG helper_fnstcw(void)
4561#endif
4562{
4563 return env->fpuc;
4564}
4565
4566static void update_fp_status(void)
4567{
4568 int rnd_type;
4569
4570 /* set rounding mode */
4571 switch(env->fpuc & RC_MASK) {
4572 default:
4573 case RC_NEAR:
4574 rnd_type = float_round_nearest_even;
4575 break;
4576 case RC_DOWN:
4577 rnd_type = float_round_down;
4578 break;
4579 case RC_UP:
4580 rnd_type = float_round_up;
4581 break;
4582 case RC_CHOP:
4583 rnd_type = float_round_to_zero;
4584 break;
4585 }
4586 set_float_rounding_mode(rnd_type, &env->fp_status);
4587#ifdef FLOATX80
4588 switch((env->fpuc >> 8) & 3) {
4589 case 0:
4590 rnd_type = 32;
4591 break;
4592 case 2:
4593 rnd_type = 64;
4594 break;
4595 case 3:
4596 default:
4597 rnd_type = 80;
4598 break;
4599 }
4600 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4601#endif
4602}
4603
4604void helper_fldcw(uint32_t val)
4605{
4606 env->fpuc = val;
4607 update_fp_status();
4608}
4609
4610void helper_fclex(void)
4611{
4612 env->fpus &= 0x7f00;
4613}
4614
4615void helper_fwait(void)
4616{
4617 if (env->fpus & FPUS_SE)
4618 fpu_raise_exception();
4619}
4620
4621void helper_fninit(void)
4622{
4623 env->fpus = 0;
4624 env->fpstt = 0;
4625 env->fpuc = 0x37f;
4626 env->fptags[0] = 1;
4627 env->fptags[1] = 1;
4628 env->fptags[2] = 1;
4629 env->fptags[3] = 1;
4630 env->fptags[4] = 1;
4631 env->fptags[5] = 1;
4632 env->fptags[6] = 1;
4633 env->fptags[7] = 1;
4634}
4635
4636/* BCD ops */
4637
4638void helper_fbld_ST0(target_ulong ptr)
4639{
4640 CPU86_LDouble tmp;
4641 uint64_t val;
4642 unsigned int v;
4643 int i;
4644
4645 val = 0;
4646 for(i = 8; i >= 0; i--) {
4647 v = ldub(ptr + i);
4648 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4649 }
4650 tmp = val;
4651 if (ldub(ptr + 9) & 0x80)
4652 tmp = -tmp;
4653 fpush();
4654 ST0 = tmp;
4655}
4656
4657void helper_fbst_ST0(target_ulong ptr)
4658{
4659 int v;
4660 target_ulong mem_ref, mem_end;
4661 int64_t val;
4662
4663 val = floatx_to_int64(ST0, &env->fp_status);
4664 mem_ref = ptr;
4665 mem_end = mem_ref + 9;
4666 if (val < 0) {
4667 stb(mem_end, 0x80);
4668 val = -val;
4669 } else {
4670 stb(mem_end, 0x00);
4671 }
4672 while (mem_ref < mem_end) {
4673 if (val == 0)
4674 break;
4675 v = val % 100;
4676 val = val / 100;
4677 v = ((v / 10) << 4) | (v % 10);
4678 stb(mem_ref++, v);
4679 }
4680 while (mem_ref < mem_end) {
4681 stb(mem_ref++, 0);
4682 }
4683}
4684
4685void helper_f2xm1(void)
4686{
4687 ST0 = pow(2.0,ST0) - 1.0;
4688}
4689
4690void helper_fyl2x(void)
4691{
4692 CPU86_LDouble fptemp;
4693
4694 fptemp = ST0;
4695 if (fptemp>0.0){
4696 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4697 ST1 *= fptemp;
4698 fpop();
4699 } else {
4700 env->fpus &= (~0x4700);
4701 env->fpus |= 0x400;
4702 }
4703}
4704
4705void helper_fptan(void)
4706{
4707 CPU86_LDouble fptemp;
4708
4709 fptemp = ST0;
4710 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4711 env->fpus |= 0x400;
4712 } else {
4713 ST0 = tan(fptemp);
4714 fpush();
4715 ST0 = 1.0;
4716 env->fpus &= (~0x400); /* C2 <-- 0 */
4717 /* the above code is for |arg| < 2**52 only */
4718 }
4719}
4720
4721void helper_fpatan(void)
4722{
4723 CPU86_LDouble fptemp, fpsrcop;
4724
4725 fpsrcop = ST1;
4726 fptemp = ST0;
4727 ST1 = atan2(fpsrcop,fptemp);
4728 fpop();
4729}
4730
4731void helper_fxtract(void)
4732{
4733 CPU86_LDoubleU temp;
4734 unsigned int expdif;
4735
4736 temp.d = ST0;
4737 expdif = EXPD(temp) - EXPBIAS;
4738 /*DP exponent bias*/
4739 ST0 = expdif;
4740 fpush();
4741 BIASEXPONENT(temp);
4742 ST0 = temp.d;
4743}
4744
4745void helper_fprem1(void)
4746{
4747 CPU86_LDouble dblq, fpsrcop, fptemp;
4748 CPU86_LDoubleU fpsrcop1, fptemp1;
4749 int expdif;
4750 signed long long int q;
4751
4752#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4753 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4754#else
4755 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4756#endif
4757 ST0 = 0.0 / 0.0; /* NaN */
4758 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4759 return;
4760 }
4761
4762 fpsrcop = ST0;
4763 fptemp = ST1;
4764 fpsrcop1.d = fpsrcop;
4765 fptemp1.d = fptemp;
4766 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4767
4768 if (expdif < 0) {
4769 /* optimisation? taken from the AMD docs */
4770 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4771 /* ST0 is unchanged */
4772 return;
4773 }
4774
4775 if (expdif < 53) {
4776 dblq = fpsrcop / fptemp;
4777 /* round dblq towards nearest integer */
4778 dblq = rint(dblq);
4779 ST0 = fpsrcop - fptemp * dblq;
4780
4781 /* convert dblq to q by truncating towards zero */
4782 if (dblq < 0.0)
4783 q = (signed long long int)(-dblq);
4784 else
4785 q = (signed long long int)dblq;
4786
4787 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4788 /* (C0,C3,C1) <-- (q2,q1,q0) */
4789 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4790 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4791 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4792 } else {
4793 env->fpus |= 0x400; /* C2 <-- 1 */
4794 fptemp = pow(2.0, expdif - 50);
4795 fpsrcop = (ST0 / ST1) / fptemp;
4796 /* fpsrcop = integer obtained by chopping */
4797 fpsrcop = (fpsrcop < 0.0) ?
4798 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4799 ST0 -= (ST1 * fpsrcop * fptemp);
4800 }
4801}
4802
4803void helper_fprem(void)
4804{
4805 CPU86_LDouble dblq, fpsrcop, fptemp;
4806 CPU86_LDoubleU fpsrcop1, fptemp1;
4807 int expdif;
4808 signed long long int q;
4809
4810#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4811 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4812#else
4813 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4814#endif
4815 ST0 = 0.0 / 0.0; /* NaN */
4816 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4817 return;
4818 }
4819
4820 fpsrcop = (CPU86_LDouble)ST0;
4821 fptemp = (CPU86_LDouble)ST1;
4822 fpsrcop1.d = fpsrcop;
4823 fptemp1.d = fptemp;
4824 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4825
4826 if (expdif < 0) {
4827 /* optimisation? taken from the AMD docs */
4828 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4829 /* ST0 is unchanged */
4830 return;
4831 }
4832
4833 if ( expdif < 53 ) {
4834 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4835 /* round dblq towards zero */
4836 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4837 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4838
4839 /* convert dblq to q by truncating towards zero */
4840 if (dblq < 0.0)
4841 q = (signed long long int)(-dblq);
4842 else
4843 q = (signed long long int)dblq;
4844
4845 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4846 /* (C0,C3,C1) <-- (q2,q1,q0) */
4847 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4848 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4849 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4850 } else {
4851 int N = 32 + (expdif % 32); /* as per AMD docs */
4852 env->fpus |= 0x400; /* C2 <-- 1 */
4853 fptemp = pow(2.0, (double)(expdif - N));
4854 fpsrcop = (ST0 / ST1) / fptemp;
4855 /* fpsrcop = integer obtained by chopping */
4856 fpsrcop = (fpsrcop < 0.0) ?
4857 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4858 ST0 -= (ST1 * fpsrcop * fptemp);
4859 }
4860}
4861
4862void helper_fyl2xp1(void)
4863{
4864 CPU86_LDouble fptemp;
4865
4866 fptemp = ST0;
4867 if ((fptemp+1.0)>0.0) {
4868 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4869 ST1 *= fptemp;
4870 fpop();
4871 } else {
4872 env->fpus &= (~0x4700);
4873 env->fpus |= 0x400;
4874 }
4875}
4876
4877void helper_fsqrt(void)
4878{
4879 CPU86_LDouble fptemp;
4880
4881 fptemp = ST0;
4882 if (fptemp<0.0) {
4883 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4884 env->fpus |= 0x400;
4885 }
4886 ST0 = sqrt(fptemp);
4887}
4888
4889void helper_fsincos(void)
4890{
4891 CPU86_LDouble fptemp;
4892
4893 fptemp = ST0;
4894 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4895 env->fpus |= 0x400;
4896 } else {
4897 ST0 = sin(fptemp);
4898 fpush();
4899 ST0 = cos(fptemp);
4900 env->fpus &= (~0x400); /* C2 <-- 0 */
4901 /* the above code is for |arg| < 2**63 only */
4902 }
4903}
4904
4905void helper_frndint(void)
4906{
4907 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4908}
4909
4910void helper_fscale(void)
4911{
4912 ST0 = ldexp (ST0, (int)(ST1));
4913}
4914
4915void helper_fsin(void)
4916{
4917 CPU86_LDouble fptemp;
4918
4919 fptemp = ST0;
4920 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4921 env->fpus |= 0x400;
4922 } else {
4923 ST0 = sin(fptemp);
4924 env->fpus &= (~0x400); /* C2 <-- 0 */
4925 /* the above code is for |arg| < 2**53 only */
4926 }
4927}
4928
4929void helper_fcos(void)
4930{
4931 CPU86_LDouble fptemp;
4932
4933 fptemp = ST0;
4934 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4935 env->fpus |= 0x400;
4936 } else {
4937 ST0 = cos(fptemp);
4938 env->fpus &= (~0x400); /* C2 <-- 0 */
4939 /* the above code is for |arg5 < 2**63 only */
4940 }
4941}
4942
4943void helper_fxam_ST0(void)
4944{
4945 CPU86_LDoubleU temp;
4946 int expdif;
4947
4948 temp.d = ST0;
4949
4950 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4951 if (SIGND(temp))
4952 env->fpus |= 0x200; /* C1 <-- 1 */
4953
4954 /* XXX: test fptags too */
4955 expdif = EXPD(temp);
4956 if (expdif == MAXEXPD) {
4957#ifdef USE_X86LDOUBLE
4958 if (MANTD(temp) == 0x8000000000000000ULL)
4959#else
4960 if (MANTD(temp) == 0)
4961#endif
4962 env->fpus |= 0x500 /*Infinity*/;
4963 else
4964 env->fpus |= 0x100 /*NaN*/;
4965 } else if (expdif == 0) {
4966 if (MANTD(temp) == 0)
4967 env->fpus |= 0x4000 /*Zero*/;
4968 else
4969 env->fpus |= 0x4400 /*Denormal*/;
4970 } else {
4971 env->fpus |= 0x400;
4972 }
4973}
4974
4975void helper_fstenv(target_ulong ptr, int data32)
4976{
4977 int fpus, fptag, exp, i;
4978 uint64_t mant;
4979 CPU86_LDoubleU tmp;
4980
4981 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4982 fptag = 0;
4983 for (i=7; i>=0; i--) {
4984 fptag <<= 2;
4985 if (env->fptags[i]) {
4986 fptag |= 3;
4987 } else {
4988 tmp.d = env->fpregs[i].d;
4989 exp = EXPD(tmp);
4990 mant = MANTD(tmp);
4991 if (exp == 0 && mant == 0) {
4992 /* zero */
4993 fptag |= 1;
4994 } else if (exp == 0 || exp == MAXEXPD
4995#ifdef USE_X86LDOUBLE
4996 || (mant & (1LL << 63)) == 0
4997#endif
4998 ) {
4999 /* NaNs, infinity, denormal */
5000 fptag |= 2;
5001 }
5002 }
5003 }
5004 if (data32) {
5005 /* 32 bit */
5006 stl(ptr, env->fpuc);
5007 stl(ptr + 4, fpus);
5008 stl(ptr + 8, fptag);
5009 stl(ptr + 12, 0); /* fpip */
5010 stl(ptr + 16, 0); /* fpcs */
5011 stl(ptr + 20, 0); /* fpoo */
5012 stl(ptr + 24, 0); /* fpos */
5013 } else {
5014 /* 16 bit */
5015 stw(ptr, env->fpuc);
5016 stw(ptr + 2, fpus);
5017 stw(ptr + 4, fptag);
5018 stw(ptr + 6, 0);
5019 stw(ptr + 8, 0);
5020 stw(ptr + 10, 0);
5021 stw(ptr + 12, 0);
5022 }
5023}
5024
5025void helper_fldenv(target_ulong ptr, int data32)
5026{
5027 int i, fpus, fptag;
5028
5029 if (data32) {
5030 env->fpuc = lduw(ptr);
5031 fpus = lduw(ptr + 4);
5032 fptag = lduw(ptr + 8);
5033 }
5034 else {
5035 env->fpuc = lduw(ptr);
5036 fpus = lduw(ptr + 2);
5037 fptag = lduw(ptr + 4);
5038 }
5039 env->fpstt = (fpus >> 11) & 7;
5040 env->fpus = fpus & ~0x3800;
5041 for(i = 0;i < 8; i++) {
5042 env->fptags[i] = ((fptag & 3) == 3);
5043 fptag >>= 2;
5044 }
5045}
5046
5047void helper_fsave(target_ulong ptr, int data32)
5048{
5049 CPU86_LDouble tmp;
5050 int i;
5051
5052 helper_fstenv(ptr, data32);
5053
5054 ptr += (14 << data32);
5055 for(i = 0;i < 8; i++) {
5056 tmp = ST(i);
5057 helper_fstt(tmp, ptr);
5058 ptr += 10;
5059 }
5060
5061 /* fninit */
5062 env->fpus = 0;
5063 env->fpstt = 0;
5064 env->fpuc = 0x37f;
5065 env->fptags[0] = 1;
5066 env->fptags[1] = 1;
5067 env->fptags[2] = 1;
5068 env->fptags[3] = 1;
5069 env->fptags[4] = 1;
5070 env->fptags[5] = 1;
5071 env->fptags[6] = 1;
5072 env->fptags[7] = 1;
5073}
5074
5075void helper_frstor(target_ulong ptr, int data32)
5076{
5077 CPU86_LDouble tmp;
5078 int i;
5079
5080 helper_fldenv(ptr, data32);
5081 ptr += (14 << data32);
5082
5083 for(i = 0;i < 8; i++) {
5084 tmp = helper_fldt(ptr);
5085 ST(i) = tmp;
5086 ptr += 10;
5087 }
5088}
5089
5090void helper_fxsave(target_ulong ptr, int data64)
5091{
5092 int fpus, fptag, i, nb_xmm_regs;
5093 CPU86_LDouble tmp;
5094 target_ulong addr;
5095
5096 /* The operand must be 16 byte aligned */
5097 if (ptr & 0xf) {
5098 raise_exception(EXCP0D_GPF);
5099 }
5100
5101 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5102 fptag = 0;
5103 for(i = 0; i < 8; i++) {
5104 fptag |= (env->fptags[i] << i);
5105 }
5106 stw(ptr, env->fpuc);
5107 stw(ptr + 2, fpus);
5108 stw(ptr + 4, fptag ^ 0xff);
5109#ifdef TARGET_X86_64
5110 if (data64) {
5111 stq(ptr + 0x08, 0); /* rip */
5112 stq(ptr + 0x10, 0); /* rdp */
5113 } else
5114#endif
5115 {
5116 stl(ptr + 0x08, 0); /* eip */
5117 stl(ptr + 0x0c, 0); /* sel */
5118 stl(ptr + 0x10, 0); /* dp */
5119 stl(ptr + 0x14, 0); /* sel */
5120 }
5121
5122 addr = ptr + 0x20;
5123 for(i = 0;i < 8; i++) {
5124 tmp = ST(i);
5125 helper_fstt(tmp, addr);
5126 addr += 16;
5127 }
5128
5129 if (env->cr[4] & CR4_OSFXSR_MASK) {
5130 /* XXX: finish it */
5131 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5132 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5133 if (env->hflags & HF_CS64_MASK)
5134 nb_xmm_regs = 16;
5135 else
5136 nb_xmm_regs = 8;
5137 addr = ptr + 0xa0;
5138 /* Fast FXSAVE leaves out the XMM registers */
5139 if (!(env->efer & MSR_EFER_FFXSR)
5140 || (env->hflags & HF_CPL_MASK)
5141 || !(env->hflags & HF_LMA_MASK)) {
5142 for(i = 0; i < nb_xmm_regs; i++) {
5143 stq(addr, env->xmm_regs[i].XMM_Q(0));
5144 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5145 addr += 16;
5146 }
5147 }
5148 }
5149}
5150
5151void helper_fxrstor(target_ulong ptr, int data64)
5152{
5153 int i, fpus, fptag, nb_xmm_regs;
5154 CPU86_LDouble tmp;
5155 target_ulong addr;
5156
5157 /* The operand must be 16 byte aligned */
5158 if (ptr & 0xf) {
5159 raise_exception(EXCP0D_GPF);
5160 }
5161
5162 env->fpuc = lduw(ptr);
5163 fpus = lduw(ptr + 2);
5164 fptag = lduw(ptr + 4);
5165 env->fpstt = (fpus >> 11) & 7;
5166 env->fpus = fpus & ~0x3800;
5167 fptag ^= 0xff;
5168 for(i = 0;i < 8; i++) {
5169 env->fptags[i] = ((fptag >> i) & 1);
5170 }
5171
5172 addr = ptr + 0x20;
5173 for(i = 0;i < 8; i++) {
5174 tmp = helper_fldt(addr);
5175 ST(i) = tmp;
5176 addr += 16;
5177 }
5178
5179 if (env->cr[4] & CR4_OSFXSR_MASK) {
5180 /* XXX: finish it */
5181 env->mxcsr = ldl(ptr + 0x18);
5182 //ldl(ptr + 0x1c);
5183 if (env->hflags & HF_CS64_MASK)
5184 nb_xmm_regs = 16;
5185 else
5186 nb_xmm_regs = 8;
5187 addr = ptr + 0xa0;
5188 /* Fast FXRESTORE leaves out the XMM registers */
5189 if (!(env->efer & MSR_EFER_FFXSR)
5190 || (env->hflags & HF_CPL_MASK)
5191 || !(env->hflags & HF_LMA_MASK)) {
5192 for(i = 0; i < nb_xmm_regs; i++) {
5193#if !defined(VBOX) || __GNUC__ < 4
5194 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5195 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5196#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5197# if 1
5198 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5199 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5200 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5201 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5202# else
5203 /* this works fine on Mac OS X, gcc 4.0.1 */
5204 uint64_t u64 = ldq(addr);
5205 env->xmm_regs[i].XMM_Q(0);
5206 u64 = ldq(addr + 4);
5207 env->xmm_regs[i].XMM_Q(1) = u64;
5208# endif
5209#endif
5210 addr += 16;
5211 }
5212 }
5213 }
5214}
5215
5216#ifndef USE_X86LDOUBLE
5217
5218void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5219{
5220 CPU86_LDoubleU temp;
5221 int e;
5222
5223 temp.d = f;
5224 /* mantissa */
5225 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5226 /* exponent + sign */
5227 e = EXPD(temp) - EXPBIAS + 16383;
5228 e |= SIGND(temp) >> 16;
5229 *pexp = e;
5230}
5231
5232CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5233{
5234 CPU86_LDoubleU temp;
5235 int e;
5236 uint64_t ll;
5237
5238 /* XXX: handle overflow ? */
5239 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5240 e |= (upper >> 4) & 0x800; /* sign */
5241 ll = (mant >> 11) & ((1LL << 52) - 1);
5242#ifdef __arm__
5243 temp.l.upper = (e << 20) | (ll >> 32);
5244 temp.l.lower = ll;
5245#else
5246 temp.ll = ll | ((uint64_t)e << 52);
5247#endif
5248 return temp.d;
5249}
5250
5251#else
5252
5253void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5254{
5255 CPU86_LDoubleU temp;
5256
5257 temp.d = f;
5258 *pmant = temp.l.lower;
5259 *pexp = temp.l.upper;
5260}
5261
5262CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5263{
5264 CPU86_LDoubleU temp;
5265
5266 temp.l.upper = upper;
5267 temp.l.lower = mant;
5268 return temp.d;
5269}
5270#endif
5271
5272#ifdef TARGET_X86_64
5273
5274//#define DEBUG_MULDIV
5275
5276static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5277{
5278 *plow += a;
5279 /* carry test */
5280 if (*plow < a)
5281 (*phigh)++;
5282 *phigh += b;
5283}
5284
5285static void neg128(uint64_t *plow, uint64_t *phigh)
5286{
5287 *plow = ~ *plow;
5288 *phigh = ~ *phigh;
5289 add128(plow, phigh, 1, 0);
5290}
5291
5292/* return TRUE if overflow */
5293static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5294{
5295 uint64_t q, r, a1, a0;
5296 int i, qb, ab;
5297
5298 a0 = *plow;
5299 a1 = *phigh;
5300 if (a1 == 0) {
5301 q = a0 / b;
5302 r = a0 % b;
5303 *plow = q;
5304 *phigh = r;
5305 } else {
5306 if (a1 >= b)
5307 return 1;
5308 /* XXX: use a better algorithm */
5309 for(i = 0; i < 64; i++) {
5310 ab = a1 >> 63;
5311 a1 = (a1 << 1) | (a0 >> 63);
5312 if (ab || a1 >= b) {
5313 a1 -= b;
5314 qb = 1;
5315 } else {
5316 qb = 0;
5317 }
5318 a0 = (a0 << 1) | qb;
5319 }
5320#if defined(DEBUG_MULDIV)
5321 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5322 *phigh, *plow, b, a0, a1);
5323#endif
5324 *plow = a0;
5325 *phigh = a1;
5326 }
5327 return 0;
5328}
5329
5330/* return TRUE if overflow */
5331static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5332{
5333 int sa, sb;
5334 sa = ((int64_t)*phigh < 0);
5335 if (sa)
5336 neg128(plow, phigh);
5337 sb = (b < 0);
5338 if (sb)
5339 b = -b;
5340 if (div64(plow, phigh, b) != 0)
5341 return 1;
5342 if (sa ^ sb) {
5343 if (*plow > (1ULL << 63))
5344 return 1;
5345 *plow = - *plow;
5346 } else {
5347 if (*plow >= (1ULL << 63))
5348 return 1;
5349 }
5350 if (sa)
5351 *phigh = - *phigh;
5352 return 0;
5353}
5354
5355void helper_mulq_EAX_T0(target_ulong t0)
5356{
5357 uint64_t r0, r1;
5358
5359 mulu64(&r0, &r1, EAX, t0);
5360 EAX = r0;
5361 EDX = r1;
5362 CC_DST = r0;
5363 CC_SRC = r1;
5364}
5365
5366void helper_imulq_EAX_T0(target_ulong t0)
5367{
5368 uint64_t r0, r1;
5369
5370 muls64(&r0, &r1, EAX, t0);
5371 EAX = r0;
5372 EDX = r1;
5373 CC_DST = r0;
5374 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5375}
5376
5377target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5378{
5379 uint64_t r0, r1;
5380
5381 muls64(&r0, &r1, t0, t1);
5382 CC_DST = r0;
5383 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5384 return r0;
5385}
5386
5387void helper_divq_EAX(target_ulong t0)
5388{
5389 uint64_t r0, r1;
5390 if (t0 == 0) {
5391 raise_exception(EXCP00_DIVZ);
5392 }
5393 r0 = EAX;
5394 r1 = EDX;
5395 if (div64(&r0, &r1, t0))
5396 raise_exception(EXCP00_DIVZ);
5397 EAX = r0;
5398 EDX = r1;
5399}
5400
5401void helper_idivq_EAX(target_ulong t0)
5402{
5403 uint64_t r0, r1;
5404 if (t0 == 0) {
5405 raise_exception(EXCP00_DIVZ);
5406 }
5407 r0 = EAX;
5408 r1 = EDX;
5409 if (idiv64(&r0, &r1, t0))
5410 raise_exception(EXCP00_DIVZ);
5411 EAX = r0;
5412 EDX = r1;
5413}
5414#endif
5415
5416static void do_hlt(void)
5417{
5418 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5419 env->halted = 1;
5420 env->exception_index = EXCP_HLT;
5421 cpu_loop_exit();
5422}
5423
5424void helper_hlt(int next_eip_addend)
5425{
5426 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5427 EIP += next_eip_addend;
5428
5429 do_hlt();
5430}
5431
5432void helper_monitor(target_ulong ptr)
5433{
5434#ifdef VBOX
5435 if ((uint32_t)ECX > 1)
5436 raise_exception(EXCP0D_GPF);
5437#else /* !VBOX */
5438 if ((uint32_t)ECX != 0)
5439 raise_exception(EXCP0D_GPF);
5440#endif /* !VBOX */
5441 /* XXX: store address ? */
5442 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5443}
5444
5445void helper_mwait(int next_eip_addend)
5446{
5447 if ((uint32_t)ECX != 0)
5448 raise_exception(EXCP0D_GPF);
5449#ifdef VBOX
5450 helper_hlt(next_eip_addend);
5451#else /* !VBOX */
5452 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5453 EIP += next_eip_addend;
5454
5455 /* XXX: not complete but not completely erroneous */
5456 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5457 /* more than one CPU: do not sleep because another CPU may
5458 wake this one */
5459 } else {
5460 do_hlt();
5461 }
5462#endif /* !VBOX */
5463}
5464
5465void helper_debug(void)
5466{
5467 env->exception_index = EXCP_DEBUG;
5468 cpu_loop_exit();
5469}
5470
5471void helper_reset_rf(void)
5472{
5473 env->eflags &= ~RF_MASK;
5474}
5475
5476void helper_raise_interrupt(int intno, int next_eip_addend)
5477{
5478 raise_interrupt(intno, 1, 0, next_eip_addend);
5479}
5480
5481void helper_raise_exception(int exception_index)
5482{
5483 raise_exception(exception_index);
5484}
5485
5486void helper_cli(void)
5487{
5488 env->eflags &= ~IF_MASK;
5489}
5490
5491void helper_sti(void)
5492{
5493 env->eflags |= IF_MASK;
5494}
5495
5496#ifdef VBOX
5497void helper_cli_vme(void)
5498{
5499 env->eflags &= ~VIF_MASK;
5500}
5501
5502void helper_sti_vme(void)
5503{
5504 /* First check, then change eflags according to the AMD manual */
5505 if (env->eflags & VIP_MASK) {
5506 raise_exception(EXCP0D_GPF);
5507 }
5508 env->eflags |= VIF_MASK;
5509}
5510#endif /* VBOX */
5511
5512#if 0
5513/* vm86plus instructions */
5514void helper_cli_vm(void)
5515{
5516 env->eflags &= ~VIF_MASK;
5517}
5518
5519void helper_sti_vm(void)
5520{
5521 env->eflags |= VIF_MASK;
5522 if (env->eflags & VIP_MASK) {
5523 raise_exception(EXCP0D_GPF);
5524 }
5525}
5526#endif
5527
5528void helper_set_inhibit_irq(void)
5529{
5530 env->hflags |= HF_INHIBIT_IRQ_MASK;
5531}
5532
5533void helper_reset_inhibit_irq(void)
5534{
5535 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5536}
5537
5538void helper_boundw(target_ulong a0, int v)
5539{
5540 int low, high;
5541 low = ldsw(a0);
5542 high = ldsw(a0 + 2);
5543 v = (int16_t)v;
5544 if (v < low || v > high) {
5545 raise_exception(EXCP05_BOUND);
5546 }
5547}
5548
5549void helper_boundl(target_ulong a0, int v)
5550{
5551 int low, high;
5552 low = ldl(a0);
5553 high = ldl(a0 + 4);
5554 if (v < low || v > high) {
5555 raise_exception(EXCP05_BOUND);
5556 }
5557}
5558
5559static float approx_rsqrt(float a)
5560{
5561 return 1.0 / sqrt(a);
5562}
5563
5564static float approx_rcp(float a)
5565{
5566 return 1.0 / a;
5567}
5568
5569#if !defined(CONFIG_USER_ONLY)
5570
5571#define MMUSUFFIX _mmu
5572
5573#define SHIFT 0
5574#include "softmmu_template.h"
5575
5576#define SHIFT 1
5577#include "softmmu_template.h"
5578
5579#define SHIFT 2
5580#include "softmmu_template.h"
5581
5582#define SHIFT 3
5583#include "softmmu_template.h"
5584
5585#endif
5586
5587#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5588/* This code assumes real physical address always fit into host CPU reg,
5589 which is wrong in general, but true for our current use cases. */
5590RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5591{
5592 return remR3PhysReadS8(addr);
5593}
5594RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5595{
5596 return remR3PhysReadU8(addr);
5597}
5598void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5599{
5600 remR3PhysWriteU8(addr, val);
5601}
5602RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5603{
5604 return remR3PhysReadS16(addr);
5605}
5606RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5607{
5608 return remR3PhysReadU16(addr);
5609}
5610void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5611{
5612 remR3PhysWriteU16(addr, val);
5613}
5614RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5615{
5616 return remR3PhysReadS32(addr);
5617}
5618RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5619{
5620 return remR3PhysReadU32(addr);
5621}
5622void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5623{
5624 remR3PhysWriteU32(addr, val);
5625}
5626uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5627{
5628 return remR3PhysReadU64(addr);
5629}
5630void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5631{
5632 remR3PhysWriteU64(addr, val);
5633}
5634#endif /* VBOX */
5635
5636#if !defined(CONFIG_USER_ONLY)
5637/* try to fill the TLB and return an exception if error. If retaddr is
5638 NULL, it means that the function was called in C code (i.e. not
5639 from generated code or from helper.c) */
5640/* XXX: fix it to restore all registers */
5641void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5642{
5643 TranslationBlock *tb;
5644 int ret;
5645 uintptr_t pc;
5646 CPUX86State *saved_env;
5647
5648 /* XXX: hack to restore env in all cases, even if not called from
5649 generated code */
5650 saved_env = env;
5651 env = cpu_single_env;
5652
5653 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5654 if (ret) {
5655 if (retaddr) {
5656 /* now we have a real cpu fault */
5657 pc = (uintptr_t)retaddr;
5658 tb = tb_find_pc(pc);
5659 if (tb) {
5660 /* the PC is inside the translated code. It means that we have
5661 a virtual CPU fault */
5662 cpu_restore_state(tb, env, pc, NULL);
5663 }
5664 }
5665 raise_exception_err(env->exception_index, env->error_code);
5666 }
5667 env = saved_env;
5668}
5669#endif
5670
5671#ifdef VBOX
5672
5673/**
5674 * Correctly computes the eflags.
5675 * @returns eflags.
5676 * @param env1 CPU environment.
5677 */
5678uint32_t raw_compute_eflags(CPUX86State *env1)
5679{
5680 CPUX86State *savedenv = env;
5681 uint32_t efl;
5682 env = env1;
5683 efl = compute_eflags();
5684 env = savedenv;
5685 return efl;
5686}
5687
5688/**
5689 * Reads byte from virtual address in guest memory area.
5690 * XXX: is it working for any addresses? swapped out pages?
5691 * @returns read data byte.
5692 * @param env1 CPU environment.
5693 * @param pvAddr GC Virtual address.
5694 */
5695uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5696{
5697 CPUX86State *savedenv = env;
5698 uint8_t u8;
5699 env = env1;
5700 u8 = ldub_kernel(addr);
5701 env = savedenv;
5702 return u8;
5703}
5704
5705/**
5706 * Reads byte from virtual address in guest memory area.
5707 * XXX: is it working for any addresses? swapped out pages?
5708 * @returns read data byte.
5709 * @param env1 CPU environment.
5710 * @param pvAddr GC Virtual address.
5711 */
5712uint16_t read_word(CPUX86State *env1, target_ulong addr)
5713{
5714 CPUX86State *savedenv = env;
5715 uint16_t u16;
5716 env = env1;
5717 u16 = lduw_kernel(addr);
5718 env = savedenv;
5719 return u16;
5720}
5721
5722/**
5723 * Reads byte from virtual address in guest memory area.
5724 * XXX: is it working for any addresses? swapped out pages?
5725 * @returns read data byte.
5726 * @param env1 CPU environment.
5727 * @param pvAddr GC Virtual address.
5728 */
5729uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5730{
5731 CPUX86State *savedenv = env;
5732 uint32_t u32;
5733 env = env1;
5734 u32 = ldl_kernel(addr);
5735 env = savedenv;
5736 return u32;
5737}
5738
5739/**
5740 * Writes byte to virtual address in guest memory area.
5741 * XXX: is it working for any addresses? swapped out pages?
5742 * @returns read data byte.
5743 * @param env1 CPU environment.
5744 * @param pvAddr GC Virtual address.
5745 * @param val byte value
5746 */
5747void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5748{
5749 CPUX86State *savedenv = env;
5750 env = env1;
5751 stb(addr, val);
5752 env = savedenv;
5753}
5754
5755void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5756{
5757 CPUX86State *savedenv = env;
5758 env = env1;
5759 stw(addr, val);
5760 env = savedenv;
5761}
5762
5763void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5764{
5765 CPUX86State *savedenv = env;
5766 env = env1;
5767 stl(addr, val);
5768 env = savedenv;
5769}
5770
5771/**
5772 * Correctly loads selector into segment register with updating internal
5773 * qemu data/caches.
5774 * @param env1 CPU environment.
5775 * @param seg_reg Segment register.
5776 * @param selector Selector to load.
5777 */
5778void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5779{
5780 CPUX86State *savedenv = env;
5781#ifdef FORCE_SEGMENT_SYNC
5782 jmp_buf old_buf;
5783#endif
5784
5785 env = env1;
5786
5787 if ( env->eflags & X86_EFL_VM
5788 || !(env->cr[0] & X86_CR0_PE))
5789 {
5790 load_seg_vm(seg_reg, selector);
5791
5792 env = savedenv;
5793
5794 /* Successful sync. */
5795 Assert(env1->segs[seg_reg].newselector == 0);
5796 }
5797 else
5798 {
5799 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5800 time critical - let's not do that */
5801#ifdef FORCE_SEGMENT_SYNC
5802 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5803#endif
5804 if (setjmp(env1->jmp_env) == 0)
5805 {
5806 if (seg_reg == R_CS)
5807 {
5808 uint32_t e1, e2;
5809 e1 = e2 = 0;
5810 load_segment(&e1, &e2, selector);
5811 cpu_x86_load_seg_cache(env, R_CS, selector,
5812 get_seg_base(e1, e2),
5813 get_seg_limit(e1, e2),
5814 e2);
5815 }
5816 else
5817 helper_load_seg(seg_reg, selector);
5818 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5819 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5820
5821 env = savedenv;
5822
5823 /* Successful sync. */
5824 Assert(env1->segs[seg_reg].newselector == 0);
5825 }
5826 else
5827 {
5828 env = savedenv;
5829
5830 /* Postpone sync until the guest uses the selector. */
5831 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5832 env1->segs[seg_reg].newselector = selector;
5833 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5834 env1->exception_index = -1;
5835 env1->error_code = 0;
5836 env1->old_exception = -1;
5837 }
5838#ifdef FORCE_SEGMENT_SYNC
5839 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5840#endif
5841 }
5842
5843}
5844
5845DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5846{
5847 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
5848}
5849
5850
5851int emulate_single_instr(CPUX86State *env1)
5852{
5853 TranslationBlock *tb;
5854 TranslationBlock *current;
5855 int flags;
5856 uint8_t *tc_ptr;
5857 target_ulong old_eip;
5858
5859 /* ensures env is loaded! */
5860 CPUX86State *savedenv = env;
5861 env = env1;
5862
5863 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5864
5865 current = env->current_tb;
5866 env->current_tb = NULL;
5867 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5868
5869 /*
5870 * Translate only one instruction.
5871 */
5872 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5873 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5874 env->segs[R_CS].base, flags, 0);
5875
5876 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5877
5878
5879 /* tb_link_phys: */
5880 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5881 tb->jmp_next[0] = NULL;
5882 tb->jmp_next[1] = NULL;
5883 Assert(tb->jmp_next[0] == NULL);
5884 Assert(tb->jmp_next[1] == NULL);
5885 if (tb->tb_next_offset[0] != 0xffff)
5886 tb_reset_jump(tb, 0);
5887 if (tb->tb_next_offset[1] != 0xffff)
5888 tb_reset_jump(tb, 1);
5889
5890 /*
5891 * Execute it using emulation
5892 */
5893 old_eip = env->eip;
5894 env->current_tb = tb;
5895
5896 /*
5897 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5898 * perhaps not a very safe hack
5899 */
5900 while (old_eip == env->eip)
5901 {
5902 tc_ptr = tb->tc_ptr;
5903
5904#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5905 int fake_ret;
5906 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5907#else
5908 tcg_qemu_tb_exec(tc_ptr);
5909#endif
5910
5911 /*
5912 * Exit once we detect an external interrupt and interrupts are enabled
5913 */
5914 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))
5915 || ( (env->eflags & IF_MASK)
5916 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
5917 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
5918 )
5919 {
5920 break;
5921 }
5922 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB) {
5923 tlb_flush(env, true);
5924 }
5925 }
5926 env->current_tb = current;
5927
5928 tb_phys_invalidate(tb, -1);
5929 tb_free(tb);
5930/*
5931 Assert(tb->tb_next_offset[0] == 0xffff);
5932 Assert(tb->tb_next_offset[1] == 0xffff);
5933 Assert(tb->tb_next[0] == 0xffff);
5934 Assert(tb->tb_next[1] == 0xffff);
5935 Assert(tb->jmp_next[0] == NULL);
5936 Assert(tb->jmp_next[1] == NULL);
5937 Assert(tb->jmp_first == NULL); */
5938
5939 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5940
5941 /*
5942 * Execute the next instruction when we encounter instruction fusing.
5943 */
5944 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5945 {
5946 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5947 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5948 emulate_single_instr(env);
5949 }
5950
5951 env = savedenv;
5952 return 0;
5953}
5954
5955/**
5956 * Correctly loads a new ldtr selector.
5957 *
5958 * @param env1 CPU environment.
5959 * @param selector Selector to load.
5960 */
5961void sync_ldtr(CPUX86State *env1, int selector)
5962{
5963 CPUX86State *saved_env = env;
5964 if (setjmp(env1->jmp_env) == 0)
5965 {
5966 env = env1;
5967 helper_lldt(selector);
5968 env = saved_env;
5969 }
5970 else
5971 {
5972 env = saved_env;
5973#ifdef VBOX_STRICT
5974 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5975#endif
5976 }
5977}
5978
5979int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5980 uint32_t *esp_ptr, int dpl)
5981{
5982 int type, index, shift;
5983
5984 CPUX86State *savedenv = env;
5985 env = env1;
5986
5987 if (!(env->tr.flags & DESC_P_MASK))
5988 cpu_abort(env, "invalid tss");
5989 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5990 if ((type & 7) != 1)
5991 cpu_abort(env, "invalid tss type %d", type);
5992 shift = type >> 3;
5993 index = (dpl * 4 + 2) << shift;
5994 if (index + (4 << shift) - 1 > env->tr.limit)
5995 {
5996 env = savedenv;
5997 return 0;
5998 }
5999 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
6000
6001 if (shift == 0) {
6002 *esp_ptr = lduw_kernel(env->tr.base + index);
6003 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
6004 } else {
6005 *esp_ptr = ldl_kernel(env->tr.base + index);
6006 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
6007 }
6008
6009 env = savedenv;
6010 return 1;
6011}
6012
6013//*****************************************************************************
6014// Needs to be at the bottom of the file (overriding macros)
6015
6016static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
6017{
6018#ifdef USE_X86LDOUBLE
6019 CPU86_LDoubleU tmp;
6020 tmp.l.lower = *(uint64_t const *)ptr;
6021 tmp.l.upper = *(uint16_t const *)(ptr + 8);
6022 return tmp.d;
6023#else
6024# error "Busted FPU saving/restoring!"
6025 return *(CPU86_LDouble *)ptr;
6026#endif
6027}
6028
6029static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
6030{
6031#ifdef USE_X86LDOUBLE
6032 CPU86_LDoubleU tmp;
6033 tmp.d = f;
6034 *(uint64_t *)(ptr + 0) = tmp.l.lower;
6035 *(uint16_t *)(ptr + 8) = tmp.l.upper;
6036 *(uint16_t *)(ptr + 10) = 0;
6037 *(uint32_t *)(ptr + 12) = 0;
6038 AssertCompile(sizeof(long double) > 8);
6039#else
6040# error "Busted FPU saving/restoring!"
6041 *(CPU86_LDouble *)ptr = f;
6042#endif
6043}
6044
6045#undef stw
6046#undef stl
6047#undef stq
6048#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
6049#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
6050#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
6051
6052//*****************************************************************************
6053void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6054{
6055 int fpus, fptag, i, nb_xmm_regs;
6056 CPU86_LDouble tmp;
6057 uint8_t *addr;
6058 int data64 = !!(env->hflags & HF_LMA_MASK);
6059
6060 if (env->cpuid_features & CPUID_FXSR)
6061 {
6062 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6063 fptag = 0;
6064 for(i = 0; i < 8; i++) {
6065 fptag |= (env->fptags[i] << i);
6066 }
6067 stw(ptr, env->fpuc);
6068 stw(ptr + 2, fpus);
6069 stw(ptr + 4, fptag ^ 0xff);
6070
6071 addr = ptr + 0x20;
6072 for(i = 0;i < 8; i++) {
6073 tmp = ST(i);
6074 helper_fstt_raw(tmp, addr);
6075 addr += 16;
6076 }
6077
6078 if (env->cr[4] & CR4_OSFXSR_MASK) {
6079 /* XXX: finish it */
6080 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6081 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6082 nb_xmm_regs = 8 << data64;
6083 addr = ptr + 0xa0;
6084 for(i = 0; i < nb_xmm_regs; i++) {
6085#if __GNUC__ < 4
6086 stq(addr, env->xmm_regs[i].XMM_Q(0));
6087 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6088#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6089 stl(addr, env->xmm_regs[i].XMM_L(0));
6090 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6091 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6092 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6093#endif
6094 addr += 16;
6095 }
6096 }
6097 }
6098 else
6099 {
6100 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6101 int fptag;
6102
6103 fp->FCW = env->fpuc;
6104 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6105 fptag = 0;
6106 for (i=7; i>=0; i--) {
6107 fptag <<= 2;
6108 if (env->fptags[i]) {
6109 fptag |= 3;
6110 } else {
6111 /* the FPU automatically computes it */
6112 }
6113 }
6114 fp->FTW = fptag;
6115
6116 for(i = 0;i < 8; i++) {
6117 tmp = ST(i);
6118 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6119 }
6120 }
6121}
6122
6123//*****************************************************************************
6124#undef lduw
6125#undef ldl
6126#undef ldq
6127#define lduw(a) *(uint16_t *)(a)
6128#define ldl(a) *(uint32_t *)(a)
6129#define ldq(a) *(uint64_t *)(a)
6130//*****************************************************************************
6131void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6132{
6133 int i, fpus, fptag, nb_xmm_regs;
6134 CPU86_LDouble tmp;
6135 uint8_t *addr;
6136 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6137
6138 if (env->cpuid_features & CPUID_FXSR)
6139 {
6140 env->fpuc = lduw(ptr);
6141 fpus = lduw(ptr + 2);
6142 fptag = lduw(ptr + 4);
6143 env->fpstt = (fpus >> 11) & 7;
6144 env->fpus = fpus & ~0x3800;
6145 fptag ^= 0xff;
6146 for(i = 0;i < 8; i++) {
6147 env->fptags[i] = ((fptag >> i) & 1);
6148 }
6149
6150 addr = ptr + 0x20;
6151 for(i = 0;i < 8; i++) {
6152 tmp = helper_fldt_raw(addr);
6153 ST(i) = tmp;
6154 addr += 16;
6155 }
6156
6157 if (env->cr[4] & CR4_OSFXSR_MASK) {
6158 /* XXX: finish it, endianness */
6159 env->mxcsr = ldl(ptr + 0x18);
6160 //ldl(ptr + 0x1c);
6161 nb_xmm_regs = 8 << data64;
6162 addr = ptr + 0xa0;
6163 for(i = 0; i < nb_xmm_regs; i++) {
6164#if HC_ARCH_BITS == 32
6165 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6166 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6167 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6168 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6169 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6170#else
6171 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6172 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6173#endif
6174 addr += 16;
6175 }
6176 }
6177 }
6178 else
6179 {
6180 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6181 int fptag, j;
6182
6183 env->fpuc = fp->FCW;
6184 env->fpstt = (fp->FSW >> 11) & 7;
6185 env->fpus = fp->FSW & ~0x3800;
6186 fptag = fp->FTW;
6187 for(i = 0;i < 8; i++) {
6188 env->fptags[i] = ((fptag & 3) == 3);
6189 fptag >>= 2;
6190 }
6191 j = env->fpstt;
6192 for(i = 0;i < 8; i++) {
6193 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6194 ST(i) = tmp;
6195 }
6196 }
6197}
6198//*****************************************************************************
6199//*****************************************************************************
6200
6201#endif /* VBOX */
6202
6203/* Secure Virtual Machine helpers */
6204
6205#if defined(CONFIG_USER_ONLY)
6206
6207void helper_vmrun(int aflag, int next_eip_addend)
6208{
6209}
6210void helper_vmmcall(void)
6211{
6212}
6213void helper_vmload(int aflag)
6214{
6215}
6216void helper_vmsave(int aflag)
6217{
6218}
6219void helper_stgi(void)
6220{
6221}
6222void helper_clgi(void)
6223{
6224}
6225void helper_skinit(void)
6226{
6227}
6228void helper_invlpga(int aflag)
6229{
6230}
6231void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6232{
6233}
6234void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6235{
6236}
6237
6238void helper_svm_check_io(uint32_t port, uint32_t param,
6239 uint32_t next_eip_addend)
6240{
6241}
6242#else
6243
6244static inline void svm_save_seg(target_phys_addr_t addr,
6245 const SegmentCache *sc)
6246{
6247 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6248 sc->selector);
6249 stq_phys(addr + offsetof(struct vmcb_seg, base),
6250 sc->base);
6251 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6252 sc->limit);
6253 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6254 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6255}
6256
6257static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6258{
6259 unsigned int flags;
6260
6261 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6262 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6263 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6264 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6265 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6266}
6267
6268static inline void svm_load_seg_cache(target_phys_addr_t addr,
6269 CPUState *env, int seg_reg)
6270{
6271 SegmentCache sc1, *sc = &sc1;
6272 svm_load_seg(addr, sc);
6273 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6274 sc->base, sc->limit, sc->flags);
6275}
6276
6277void helper_vmrun(int aflag, int next_eip_addend)
6278{
6279 target_ulong addr;
6280 uint32_t event_inj;
6281 uint32_t int_ctl;
6282
6283 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6284
6285 if (aflag == 2)
6286 addr = EAX;
6287 else
6288 addr = (uint32_t)EAX;
6289
6290 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6291
6292 env->vm_vmcb = addr;
6293
6294 /* save the current CPU state in the hsave page */
6295 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6296 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6297
6298 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6299 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6300
6301 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6302 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6303 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6304 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6305 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6306 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6307
6308 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6309 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6310
6311 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6312 &env->segs[R_ES]);
6313 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6314 &env->segs[R_CS]);
6315 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6316 &env->segs[R_SS]);
6317 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6318 &env->segs[R_DS]);
6319
6320 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6321 EIP + next_eip_addend);
6322 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6323 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6324
6325 /* load the interception bitmaps so we do not need to access the
6326 vmcb in svm mode */
6327 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6328 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6329 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6330 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6331 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6332 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6333
6334 /* enable intercepts */
6335 env->hflags |= HF_SVMI_MASK;
6336
6337 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6338
6339 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6340 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6341
6342 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6343 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6344
6345 /* clear exit_info_2 so we behave like the real hardware */
6346 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6347
6348 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6349 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6350 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6351 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6352 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6353 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6354 if (int_ctl & V_INTR_MASKING_MASK) {
6355 env->v_tpr = int_ctl & V_TPR_MASK;
6356 env->hflags2 |= HF2_VINTR_MASK;
6357 if (env->eflags & IF_MASK)
6358 env->hflags2 |= HF2_HIF_MASK;
6359 }
6360
6361 cpu_load_efer(env,
6362 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6363 env->eflags = 0;
6364 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6365 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6366 CC_OP = CC_OP_EFLAGS;
6367
6368 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6369 env, R_ES);
6370 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6371 env, R_CS);
6372 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6373 env, R_SS);
6374 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6375 env, R_DS);
6376
6377 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6378 env->eip = EIP;
6379 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6380 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6381 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6382 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6383 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6384
6385 /* FIXME: guest state consistency checks */
6386
6387 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6388 case TLB_CONTROL_DO_NOTHING:
6389 break;
6390 case TLB_CONTROL_FLUSH_ALL_ASID:
6391 /* FIXME: this is not 100% correct but should work for now */
6392 tlb_flush(env, 1);
6393 break;
6394 }
6395
6396 env->hflags2 |= HF2_GIF_MASK;
6397
6398 if (int_ctl & V_IRQ_MASK) {
6399 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6400 }
6401
6402 /* maybe we need to inject an event */
6403 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6404 if (event_inj & SVM_EVTINJ_VALID) {
6405 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6406 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6407 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6408
6409 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6410 /* FIXME: need to implement valid_err */
6411 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6412 case SVM_EVTINJ_TYPE_INTR:
6413 env->exception_index = vector;
6414 env->error_code = event_inj_err;
6415 env->exception_is_int = 0;
6416 env->exception_next_eip = -1;
6417 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6418 /* XXX: is it always correct ? */
6419 do_interrupt(vector, 0, 0, 0, 1);
6420 break;
6421 case SVM_EVTINJ_TYPE_NMI:
6422 env->exception_index = EXCP02_NMI;
6423 env->error_code = event_inj_err;
6424 env->exception_is_int = 0;
6425 env->exception_next_eip = EIP;
6426 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6427 cpu_loop_exit();
6428 break;
6429 case SVM_EVTINJ_TYPE_EXEPT:
6430 env->exception_index = vector;
6431 env->error_code = event_inj_err;
6432 env->exception_is_int = 0;
6433 env->exception_next_eip = -1;
6434 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6435 cpu_loop_exit();
6436 break;
6437 case SVM_EVTINJ_TYPE_SOFT:
6438 env->exception_index = vector;
6439 env->error_code = event_inj_err;
6440 env->exception_is_int = 1;
6441 env->exception_next_eip = EIP;
6442 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6443 cpu_loop_exit();
6444 break;
6445 }
6446 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6447 }
6448}
6449
6450void helper_vmmcall(void)
6451{
6452 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6453 raise_exception(EXCP06_ILLOP);
6454}
6455
6456void helper_vmload(int aflag)
6457{
6458 target_ulong addr;
6459 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6460
6461 if (aflag == 2)
6462 addr = EAX;
6463 else
6464 addr = (uint32_t)EAX;
6465
6466 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6467 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6468 env->segs[R_FS].base);
6469
6470 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6471 env, R_FS);
6472 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6473 env, R_GS);
6474 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6475 &env->tr);
6476 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6477 &env->ldt);
6478
6479#ifdef TARGET_X86_64
6480 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6481 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6482 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6483 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6484#endif
6485 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6486 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6487 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6488 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6489}
6490
6491void helper_vmsave(int aflag)
6492{
6493 target_ulong addr;
6494 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6495
6496 if (aflag == 2)
6497 addr = EAX;
6498 else
6499 addr = (uint32_t)EAX;
6500
6501 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6502 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6503 env->segs[R_FS].base);
6504
6505 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6506 &env->segs[R_FS]);
6507 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6508 &env->segs[R_GS]);
6509 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6510 &env->tr);
6511 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6512 &env->ldt);
6513
6514#ifdef TARGET_X86_64
6515 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6516 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6517 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6518 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6519#endif
6520 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6521 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6522 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6523 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6524}
6525
6526void helper_stgi(void)
6527{
6528 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6529 env->hflags2 |= HF2_GIF_MASK;
6530}
6531
6532void helper_clgi(void)
6533{
6534 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6535 env->hflags2 &= ~HF2_GIF_MASK;
6536}
6537
6538void helper_skinit(void)
6539{
6540 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6541 /* XXX: not implemented */
6542 raise_exception(EXCP06_ILLOP);
6543}
6544
6545void helper_invlpga(int aflag)
6546{
6547 target_ulong addr;
6548 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6549
6550 if (aflag == 2)
6551 addr = EAX;
6552 else
6553 addr = (uint32_t)EAX;
6554
6555 /* XXX: could use the ASID to see if it is needed to do the
6556 flush */
6557 tlb_flush_page(env, addr);
6558}
6559
6560void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6561{
6562 if (likely(!(env->hflags & HF_SVMI_MASK)))
6563 return;
6564#ifndef VBOX
6565 switch(type) {
6566 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6567 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6568 helper_vmexit(type, param);
6569 }
6570 break;
6571 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6572 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6573 helper_vmexit(type, param);
6574 }
6575 break;
6576 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6577 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6578 helper_vmexit(type, param);
6579 }
6580 break;
6581 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6582 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6583 helper_vmexit(type, param);
6584 }
6585 break;
6586 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6587 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6588 helper_vmexit(type, param);
6589 }
6590 break;
6591 case SVM_EXIT_MSR:
6592 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6593 /* FIXME: this should be read in at vmrun (faster this way?) */
6594 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6595 uint32_t t0, t1;
6596 switch((uint32_t)ECX) {
6597 case 0 ... 0x1fff:
6598 t0 = (ECX * 2) % 8;
6599 t1 = ECX / 8;
6600 break;
6601 case 0xc0000000 ... 0xc0001fff:
6602 t0 = (8192 + ECX - 0xc0000000) * 2;
6603 t1 = (t0 / 8);
6604 t0 %= 8;
6605 break;
6606 case 0xc0010000 ... 0xc0011fff:
6607 t0 = (16384 + ECX - 0xc0010000) * 2;
6608 t1 = (t0 / 8);
6609 t0 %= 8;
6610 break;
6611 default:
6612 helper_vmexit(type, param);
6613 t0 = 0;
6614 t1 = 0;
6615 break;
6616 }
6617 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6618 helper_vmexit(type, param);
6619 }
6620 break;
6621 default:
6622 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6623 helper_vmexit(type, param);
6624 }
6625 break;
6626 }
6627#else /* VBOX */
6628 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6629#endif /* VBOX */
6630}
6631
6632void helper_svm_check_io(uint32_t port, uint32_t param,
6633 uint32_t next_eip_addend)
6634{
6635 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6636 /* FIXME: this should be read in at vmrun (faster this way?) */
6637 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6638 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6639 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6640 /* next EIP */
6641 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6642 env->eip + next_eip_addend);
6643 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6644 }
6645 }
6646}
6647
6648/* Note: currently only 32 bits of exit_code are used */
6649void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6650{
6651 uint32_t int_ctl;
6652
6653 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6654 exit_code, exit_info_1,
6655 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6656 EIP);
6657
6658 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6659 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6660 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6661 } else {
6662 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6663 }
6664
6665 /* Save the VM state in the vmcb */
6666 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6667 &env->segs[R_ES]);
6668 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6669 &env->segs[R_CS]);
6670 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6671 &env->segs[R_SS]);
6672 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6673 &env->segs[R_DS]);
6674
6675 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6676 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6677
6678 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6679 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6680
6681 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6682 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6683 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6684 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6685 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6686
6687 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6688 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6689 int_ctl |= env->v_tpr & V_TPR_MASK;
6690 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6691 int_ctl |= V_IRQ_MASK;
6692 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6693
6694 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6695 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6696 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6697 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6698 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6699 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6700 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6701
6702 /* Reload the host state from vm_hsave */
6703 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6704 env->hflags &= ~HF_SVMI_MASK;
6705 env->intercept = 0;
6706 env->intercept_exceptions = 0;
6707 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6708 env->tsc_offset = 0;
6709
6710 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6711 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6712
6713 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6714 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6715
6716 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6717 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6718 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6719 /* we need to set the efer after the crs so the hidden flags get
6720 set properly */
6721 cpu_load_efer(env,
6722 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6723 env->eflags = 0;
6724 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6725 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6726 CC_OP = CC_OP_EFLAGS;
6727
6728 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6729 env, R_ES);
6730 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6731 env, R_CS);
6732 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6733 env, R_SS);
6734 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6735 env, R_DS);
6736
6737 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6738 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6739 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6740
6741 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6742 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6743
6744 /* other setups */
6745 cpu_x86_set_cpl(env, 0);
6746 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6747 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6748
6749 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6750 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6751 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6752 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6753 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6754
6755 env->hflags2 &= ~HF2_GIF_MASK;
6756 /* FIXME: Resets the current ASID register to zero (host ASID). */
6757
6758 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6759
6760 /* Clears the TSC_OFFSET inside the processor. */
6761
6762 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6763 from the page table indicated the host's CR3. If the PDPEs contain
6764 illegal state, the processor causes a shutdown. */
6765
6766 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6767 env->cr[0] |= CR0_PE_MASK;
6768 env->eflags &= ~VM_MASK;
6769
6770 /* Disables all breakpoints in the host DR7 register. */
6771
6772 /* Checks the reloaded host state for consistency. */
6773
6774 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6775 host's code segment or non-canonical (in the case of long mode), a
6776 #GP fault is delivered inside the host.) */
6777
6778 /* remove any pending exception */
6779 env->exception_index = -1;
6780 env->error_code = 0;
6781 env->old_exception = -1;
6782
6783 cpu_loop_exit();
6784}
6785
6786#endif
6787
6788/* MMX/SSE */
6789/* XXX: optimize by storing fptt and fptags in the static cpu state */
6790void helper_enter_mmx(void)
6791{
6792 env->fpstt = 0;
6793 *(uint32_t *)(env->fptags) = 0;
6794 *(uint32_t *)(env->fptags + 4) = 0;
6795}
6796
6797void helper_emms(void)
6798{
6799 /* set to empty state */
6800 *(uint32_t *)(env->fptags) = 0x01010101;
6801 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6802}
6803
6804/* XXX: suppress */
6805void helper_movq(void *d, void *s)
6806{
6807 *(uint64_t *)d = *(uint64_t *)s;
6808}
6809
6810#define SHIFT 0
6811#include "ops_sse.h"
6812
6813#define SHIFT 1
6814#include "ops_sse.h"
6815
6816#define SHIFT 0
6817#include "helper_template.h"
6818#undef SHIFT
6819
6820#define SHIFT 1
6821#include "helper_template.h"
6822#undef SHIFT
6823
6824#define SHIFT 2
6825#include "helper_template.h"
6826#undef SHIFT
6827
6828#ifdef TARGET_X86_64
6829
6830#define SHIFT 3
6831#include "helper_template.h"
6832#undef SHIFT
6833
6834#endif
6835
6836/* bit operations */
6837target_ulong helper_bsf(target_ulong t0)
6838{
6839 int count;
6840 target_ulong res;
6841
6842 res = t0;
6843 count = 0;
6844 while ((res & 1) == 0) {
6845 count++;
6846 res >>= 1;
6847 }
6848 return count;
6849}
6850
6851target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6852{
6853 int count;
6854 target_ulong res, mask;
6855
6856 if (wordsize > 0 && t0 == 0) {
6857 return wordsize;
6858 }
6859 res = t0;
6860 count = TARGET_LONG_BITS - 1;
6861 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6862 while ((res & mask) == 0) {
6863 count--;
6864 res <<= 1;
6865 }
6866 if (wordsize > 0) {
6867 return wordsize - 1 - count;
6868 }
6869 return count;
6870}
6871
6872target_ulong helper_bsr(target_ulong t0)
6873{
6874 return helper_lzcnt(t0, 0);
6875}
6876
6877static int compute_all_eflags(void)
6878{
6879 return CC_SRC;
6880}
6881
6882static int compute_c_eflags(void)
6883{
6884 return CC_SRC & CC_C;
6885}
6886
6887uint32_t helper_cc_compute_all(int op)
6888{
6889 switch (op) {
6890 default: /* should never happen */ return 0;
6891
6892 case CC_OP_EFLAGS: return compute_all_eflags();
6893
6894 case CC_OP_MULB: return compute_all_mulb();
6895 case CC_OP_MULW: return compute_all_mulw();
6896 case CC_OP_MULL: return compute_all_mull();
6897
6898 case CC_OP_ADDB: return compute_all_addb();
6899 case CC_OP_ADDW: return compute_all_addw();
6900 case CC_OP_ADDL: return compute_all_addl();
6901
6902 case CC_OP_ADCB: return compute_all_adcb();
6903 case CC_OP_ADCW: return compute_all_adcw();
6904 case CC_OP_ADCL: return compute_all_adcl();
6905
6906 case CC_OP_SUBB: return compute_all_subb();
6907 case CC_OP_SUBW: return compute_all_subw();
6908 case CC_OP_SUBL: return compute_all_subl();
6909
6910 case CC_OP_SBBB: return compute_all_sbbb();
6911 case CC_OP_SBBW: return compute_all_sbbw();
6912 case CC_OP_SBBL: return compute_all_sbbl();
6913
6914 case CC_OP_LOGICB: return compute_all_logicb();
6915 case CC_OP_LOGICW: return compute_all_logicw();
6916 case CC_OP_LOGICL: return compute_all_logicl();
6917
6918 case CC_OP_INCB: return compute_all_incb();
6919 case CC_OP_INCW: return compute_all_incw();
6920 case CC_OP_INCL: return compute_all_incl();
6921
6922 case CC_OP_DECB: return compute_all_decb();
6923 case CC_OP_DECW: return compute_all_decw();
6924 case CC_OP_DECL: return compute_all_decl();
6925
6926 case CC_OP_SHLB: return compute_all_shlb();
6927 case CC_OP_SHLW: return compute_all_shlw();
6928 case CC_OP_SHLL: return compute_all_shll();
6929
6930 case CC_OP_SARB: return compute_all_sarb();
6931 case CC_OP_SARW: return compute_all_sarw();
6932 case CC_OP_SARL: return compute_all_sarl();
6933
6934#ifdef TARGET_X86_64
6935 case CC_OP_MULQ: return compute_all_mulq();
6936
6937 case CC_OP_ADDQ: return compute_all_addq();
6938
6939 case CC_OP_ADCQ: return compute_all_adcq();
6940
6941 case CC_OP_SUBQ: return compute_all_subq();
6942
6943 case CC_OP_SBBQ: return compute_all_sbbq();
6944
6945 case CC_OP_LOGICQ: return compute_all_logicq();
6946
6947 case CC_OP_INCQ: return compute_all_incq();
6948
6949 case CC_OP_DECQ: return compute_all_decq();
6950
6951 case CC_OP_SHLQ: return compute_all_shlq();
6952
6953 case CC_OP_SARQ: return compute_all_sarq();
6954#endif
6955 }
6956}
6957
6958uint32_t helper_cc_compute_c(int op)
6959{
6960 switch (op) {
6961 default: /* should never happen */ return 0;
6962
6963 case CC_OP_EFLAGS: return compute_c_eflags();
6964
6965 case CC_OP_MULB: return compute_c_mull();
6966 case CC_OP_MULW: return compute_c_mull();
6967 case CC_OP_MULL: return compute_c_mull();
6968
6969 case CC_OP_ADDB: return compute_c_addb();
6970 case CC_OP_ADDW: return compute_c_addw();
6971 case CC_OP_ADDL: return compute_c_addl();
6972
6973 case CC_OP_ADCB: return compute_c_adcb();
6974 case CC_OP_ADCW: return compute_c_adcw();
6975 case CC_OP_ADCL: return compute_c_adcl();
6976
6977 case CC_OP_SUBB: return compute_c_subb();
6978 case CC_OP_SUBW: return compute_c_subw();
6979 case CC_OP_SUBL: return compute_c_subl();
6980
6981 case CC_OP_SBBB: return compute_c_sbbb();
6982 case CC_OP_SBBW: return compute_c_sbbw();
6983 case CC_OP_SBBL: return compute_c_sbbl();
6984
6985 case CC_OP_LOGICB: return compute_c_logicb();
6986 case CC_OP_LOGICW: return compute_c_logicw();
6987 case CC_OP_LOGICL: return compute_c_logicl();
6988
6989 case CC_OP_INCB: return compute_c_incl();
6990 case CC_OP_INCW: return compute_c_incl();
6991 case CC_OP_INCL: return compute_c_incl();
6992
6993 case CC_OP_DECB: return compute_c_incl();
6994 case CC_OP_DECW: return compute_c_incl();
6995 case CC_OP_DECL: return compute_c_incl();
6996
6997 case CC_OP_SHLB: return compute_c_shlb();
6998 case CC_OP_SHLW: return compute_c_shlw();
6999 case CC_OP_SHLL: return compute_c_shll();
7000
7001 case CC_OP_SARB: return compute_c_sarl();
7002 case CC_OP_SARW: return compute_c_sarl();
7003 case CC_OP_SARL: return compute_c_sarl();
7004
7005#ifdef TARGET_X86_64
7006 case CC_OP_MULQ: return compute_c_mull();
7007
7008 case CC_OP_ADDQ: return compute_c_addq();
7009
7010 case CC_OP_ADCQ: return compute_c_adcq();
7011
7012 case CC_OP_SUBQ: return compute_c_subq();
7013
7014 case CC_OP_SBBQ: return compute_c_sbbq();
7015
7016 case CC_OP_LOGICQ: return compute_c_logicq();
7017
7018 case CC_OP_INCQ: return compute_c_incl();
7019
7020 case CC_OP_DECQ: return compute_c_incl();
7021
7022 case CC_OP_SHLQ: return compute_c_shlq();
7023
7024 case CC_OP_SARQ: return compute_c_sarl();
7025#endif
7026 }
7027}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette