VirtualBox

source: vbox/trunk/src/recompiler_new/cpu-exec.c@ 13762

Last change on this file since 13762 was 13726, checked in by vboxsync, 16 years ago

Some 32-bit work, not yet working

  • Property svn:eol-style set to native
File size: 60.1 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#define CPU_NO_GLOBAL_REGS
31#include "exec.h"
32#include "disas.h"
33#include "tcg.h"
34
35#if !defined(CONFIG_SOFTMMU)
36#undef EAX
37#undef ECX
38#undef EDX
39#undef EBX
40#undef ESP
41#undef EBP
42#undef ESI
43#undef EDI
44#undef EIP
45#include <signal.h>
46#include <sys/ucontext.h>
47#endif
48
49#if defined(__sparc__) && !defined(HOST_SOLARIS)
50// Work around ugly bugs in glibc that mangle global register contents
51#undef env
52#define env cpu_single_env
53#endif
54
55int tb_invalidated_flag;
56
57//#define DEBUG_EXEC
58//#define DEBUG_SIGNAL
59
60
61void cpu_loop_exit(void)
62{
63 /* NOTE: the register at this point must be saved by hand because
64 longjmp restore them */
65 regs_to_env();
66 longjmp(env->jmp_env, 1);
67}
68
69#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
70#define reg_T2
71#endif
72
73/* exit the current TB from a signal handler. The host registers are
74 restored in a state compatible with the CPU emulator
75 */
76void cpu_resume_from_signal(CPUState *env1, void *puc)
77{
78#if !defined(CONFIG_SOFTMMU)
79 struct ucontext *uc = puc;
80#endif
81
82 env = env1;
83
84 /* XXX: restore cpu registers saved in host registers */
85
86#if !defined(CONFIG_SOFTMMU)
87 if (puc) {
88 /* XXX: use siglongjmp ? */
89 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
90 }
91#endif
92 longjmp(env->jmp_env, 1);
93}
94
95/* Execute the code without caching the generated code. An interpreter
96 could be used if available. */
97static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
98{
99 unsigned long next_tb;
100 TranslationBlock *tb;
101
102 /* Should never happen.
103 We only end up here when an existing TB is too long. */
104 if (max_cycles > CF_COUNT_MASK)
105 max_cycles = CF_COUNT_MASK;
106
107 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
108 max_cycles);
109 env->current_tb = tb;
110 /* execute the generated code */
111 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
112
113 if ((next_tb & 3) == 2) {
114 /* Restore PC. This may happen if async event occurs before
115 the TB starts executing. */
116 CPU_PC_FROM_TB(env, tb);
117 }
118 tb_phys_invalidate(tb, -1);
119 tb_free(tb);
120}
121
122static TranslationBlock *tb_find_slow(target_ulong pc,
123 target_ulong cs_base,
124 uint64_t flags)
125{
126 TranslationBlock *tb, **ptb1;
127 unsigned int h;
128 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
129
130 tb_invalidated_flag = 0;
131
132 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
133
134 /* find translated block using physical mappings */
135 phys_pc = get_phys_addr_code(env, pc);
136 phys_page1 = phys_pc & TARGET_PAGE_MASK;
137 phys_page2 = -1;
138 h = tb_phys_hash_func(phys_pc);
139 ptb1 = &tb_phys_hash[h];
140 for(;;) {
141 tb = *ptb1;
142 if (!tb)
143 goto not_found;
144 if (tb->pc == pc &&
145 tb->page_addr[0] == phys_page1 &&
146 tb->cs_base == cs_base &&
147 tb->flags == flags) {
148 /* check next page if needed */
149 if (tb->page_addr[1] != -1) {
150 virt_page2 = (pc & TARGET_PAGE_MASK) +
151 TARGET_PAGE_SIZE;
152 phys_page2 = get_phys_addr_code(env, virt_page2);
153 if (tb->page_addr[1] == phys_page2)
154 goto found;
155 } else {
156 goto found;
157 }
158 }
159 ptb1 = &tb->phys_hash_next;
160 }
161 not_found:
162 /* if no translated code available, then translate it now */
163 tb = tb_gen_code(env, pc, cs_base, flags, 0);
164
165 found:
166 /* we add the TB in the virtual pc hash table */
167 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
168 return tb;
169}
170
171#ifndef VBOX
172static inline TranslationBlock *tb_find_fast(void)
173#else
174DECLINLINE(TranslationBlock *) tb_find_fast(void)
175#endif
176{
177 TranslationBlock *tb;
178 target_ulong cs_base, pc;
179 uint64_t flags;
180
181 /* we record a subset of the CPU state. It will
182 always be the same before a given translated block
183 is executed. */
184#if defined(TARGET_I386)
185 flags = env->hflags;
186 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
187 cs_base = env->segs[R_CS].base;
188 pc = cs_base + env->eip;
189#elif defined(TARGET_ARM)
190 flags = env->thumb | (env->vfp.vec_len << 1)
191 | (env->vfp.vec_stride << 4);
192 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
193 flags |= (1 << 6);
194 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
195 flags |= (1 << 7);
196 flags |= (env->condexec_bits << 8);
197 cs_base = 0;
198 pc = env->regs[15];
199#elif defined(TARGET_SPARC)
200#ifdef TARGET_SPARC64
201 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
202 flags = ((env->pstate & PS_AM) << 2)
203 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
204 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
205#else
206 // FPU enable . Supervisor
207 flags = (env->psref << 4) | env->psrs;
208#endif
209 cs_base = env->npc;
210 pc = env->pc;
211#elif defined(TARGET_PPC)
212 flags = env->hflags;
213 cs_base = 0;
214 pc = env->nip;
215#elif defined(TARGET_MIPS)
216 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
217 cs_base = 0;
218 pc = env->active_tc.PC;
219#elif defined(TARGET_M68K)
220 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
221 | (env->sr & SR_S) /* Bit 13 */
222 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
223 cs_base = 0;
224 pc = env->pc;
225#elif defined(TARGET_SH4)
226 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
227 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
228 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
229 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
230 cs_base = 0;
231 pc = env->pc;
232#elif defined(TARGET_ALPHA)
233 flags = env->ps;
234 cs_base = 0;
235 pc = env->pc;
236#elif defined(TARGET_CRIS)
237 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
238 flags |= env->dslot;
239 cs_base = 0;
240 pc = env->pc;
241#else
242#error unsupported CPU
243#endif
244 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
245 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
246 tb->flags != flags)) {
247 tb = tb_find_slow(pc, cs_base, flags);
248 }
249 return tb;
250}
251
252/* main execution loop */
253
254#ifdef VBOX
255
256int cpu_exec(CPUState *env1)
257{
258#define DECLARE_HOST_REGS 1
259#include "hostregs_helper.h"
260 int ret, interrupt_request;
261 TranslationBlock *tb;
262 uint8_t *tc_ptr;
263 unsigned long next_tb;
264
265 cpu_single_env = env1;
266
267 /* first we save global registers */
268#define SAVE_HOST_REGS 1
269#include "hostregs_helper.h"
270 env = env1;
271
272 env_to_regs();
273#if defined(TARGET_I386)
274 /* put eflags in CPU temporary format */
275 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
276 DF = 1 - (2 * ((env->eflags >> 10) & 1));
277 CC_OP = CC_OP_EFLAGS;
278 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
279#elif defined(TARGET_SPARC)
280#elif defined(TARGET_M68K)
281 env->cc_op = CC_OP_FLAGS;
282 env->cc_dest = env->sr & 0xf;
283 env->cc_x = (env->sr >> 4) & 1;
284#elif defined(TARGET_ALPHA)
285#elif defined(TARGET_ARM)
286#elif defined(TARGET_PPC)
287#elif defined(TARGET_MIPS)
288#elif defined(TARGET_SH4)
289#elif defined(TARGET_CRIS)
290 /* XXXXX */
291#else
292#error unsupported target CPU
293#endif
294#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
295 env->exception_index = -1;
296#endif
297
298 /* prepare setjmp context for exception handling */
299 for(;;) {
300 if (setjmp(env->jmp_env) == 0)
301 {
302 env->current_tb = NULL;
303 VMMR3Unlock(env->pVM);
304 VMMR3Lock(env->pVM);
305
306 /*
307 * Check for fatal errors first
308 */
309 if (env->interrupt_request & CPU_INTERRUPT_RC) {
310 env->exception_index = EXCP_RC;
311 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
312 ret = env->exception_index;
313 cpu_loop_exit();
314 }
315
316 /* if an exception is pending, we execute it here */
317 if (env->exception_index >= 0) {
318 Assert(!env->user_mode_only);
319 if (env->exception_index >= EXCP_INTERRUPT) {
320 /* exit request from the cpu execution loop */
321 ret = env->exception_index;
322 break;
323 } else {
324 /* simulate a real cpu exception. On i386, it can
325 trigger new exceptions, but we do not handle
326 double or triple faults yet. */
327 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
328 Log(("do_interrupt %d %d %VGv\n", env->exception_index, env->exception_is_int, env->exception_next_eip));
329 do_interrupt(env->exception_index,
330 env->exception_is_int,
331 env->error_code,
332 env->exception_next_eip, 0);
333 /* successfully delivered */
334 env->old_exception = -1;
335 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
336 }
337 env->exception_index = -1;
338 }
339
340 next_tb = 0; /* force lookup of first TB */
341 for(;;)
342 {
343 interrupt_request = env->interrupt_request;
344#ifndef VBOX
345 if (__builtin_expect(interrupt_request, 0))
346#else
347 if (RT_UNLIKELY(interrupt_request != 0))
348#endif
349 {
350 /** @todo: reconscille with what QEMU really does */
351
352 /* Single instruction exec request, we execute it and return (one way or the other).
353 The caller will always reschedule after doing this operation! */
354 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
355 {
356 /* not in flight are we? (if we are, we trapped) */
357 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
358 {
359 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
360 env->exception_index = EXCP_SINGLE_INSTR;
361 if (emulate_single_instr(env) == -1)
362 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%VGv!!\n", env->eip));
363
364 /* When we receive an external interrupt during execution of this single
365 instruction, then we should stay here. We will leave when we're ready
366 for raw-mode or when interrupted by pending EMT requests. */
367 interrupt_request = env->interrupt_request; /* reload this! */
368 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
369 || !(env->eflags & IF_MASK)
370 || (env->hflags & HF_INHIBIT_IRQ_MASK)
371 || (env->state & CPU_RAW_HWACC)
372 )
373 {
374 env->exception_index = ret = EXCP_SINGLE_INSTR;
375 cpu_loop_exit();
376 }
377 }
378 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
379 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
380 }
381
382 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
383 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
384 !(env->hflags & HF_SMM_MASK)) {
385 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
386 do_smm_enter();
387 next_tb = 0;
388 }
389 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
390 (env->eflags & IF_MASK) &&
391 !(env->hflags & HF_INHIBIT_IRQ_MASK))
392 {
393 /* if hardware interrupt pending, we execute it */
394 int intno;
395 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
396 intno = cpu_get_pic_interrupt(env);
397 if (intno >= 0)
398 {
399 Log(("do_interrupt %d\n", intno));
400 do_interrupt(intno, 0, 0, 0, 1);
401 }
402 /* ensure that no TB jump will be modified as
403 the program flow was changed */
404 next_tb = 0;
405 }
406 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
407 {
408 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
409 /* ensure that no TB jump will be modified as
410 the program flow was changed */
411 next_tb = 0;
412 }
413 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
414 if (interrupt_request & CPU_INTERRUPT_EXIT)
415 {
416 env->exception_index = EXCP_INTERRUPT;
417 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
418 ret = env->exception_index;
419 cpu_loop_exit();
420 }
421 if (interrupt_request & CPU_INTERRUPT_RC)
422 {
423 env->exception_index = EXCP_RC;
424 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
425 ret = env->exception_index;
426 cpu_loop_exit();
427 }
428 }
429
430 /*
431 * Check if we the CPU state allows us to execute the code in raw-mode.
432 */
433 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
434 if (remR3CanExecuteRaw(env,
435 env->eip + env->segs[R_CS].base,
436 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
437 &env->exception_index))
438 {
439 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
440 ret = env->exception_index;
441 cpu_loop_exit();
442 }
443 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
444
445 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
446 spin_lock(&tb_lock);
447 tb = tb_find_fast();
448 /* Note: we do it here to avoid a gcc bug on Mac OS X when
449 doing it in tb_find_slow */
450 if (tb_invalidated_flag) {
451 /* as some TB could have been invalidated because
452 of memory exceptions while generating the code, we
453 must recompute the hash index here */
454 next_tb = 0;
455 tb_invalidated_flag = 0;
456 }
457
458 /* see if we can patch the calling TB. When the TB
459 spans two pages, we cannot safely do a direct
460 jump. */
461 if (next_tb != 0
462 && !(tb->cflags & CF_RAW_MODE)
463 && tb->page_addr[1] == -1)
464 {
465 tb_add_jump((TranslationBlock *)(long)(next_tb & ~3), next_tb & 3, tb);
466 }
467 spin_unlock(&tb_lock);
468 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
469
470 env->current_tb = tb;
471 while (env->current_tb) {
472 tc_ptr = tb->tc_ptr;
473 /* execute the generated code */
474 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
475 next_tb = tcg_qemu_tb_exec(tc_ptr);
476 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
477 env->current_tb = NULL;
478 if ((next_tb & 3) == 2) {
479 /* Instruction counter expired. */
480 int insns_left;
481 tb = (TranslationBlock *)(long)(next_tb & ~3);
482 /* Restore PC. */
483 CPU_PC_FROM_TB(env, tb);
484 insns_left = env->icount_decr.u32;
485 if (env->icount_extra && insns_left >= 0) {
486 /* Refill decrementer and continue execution. */
487 env->icount_extra += insns_left;
488 if (env->icount_extra > 0xffff) {
489 insns_left = 0xffff;
490 } else {
491 insns_left = env->icount_extra;
492 }
493 env->icount_extra -= insns_left;
494 env->icount_decr.u16.low = insns_left;
495 } else {
496 if (insns_left > 0) {
497 /* Execute remaining instructions. */
498 cpu_exec_nocache(insns_left, tb);
499 }
500 env->exception_index = EXCP_INTERRUPT;
501 next_tb = 0;
502 cpu_loop_exit();
503 }
504 }
505 }
506
507 /* reset soft MMU for next block (it can currently
508 only be set by a memory fault) */
509#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
510 if (env->hflags & HF_SOFTMMU_MASK) {
511 env->hflags &= ~HF_SOFTMMU_MASK;
512 /* do not allow linking to another block */
513 next_tb = 0;
514 }
515#endif
516 } /* for(;;) */
517 } else {
518 env_to_regs();
519 }
520#ifdef VBOX_HIGH_RES_TIMERS_HACK
521 /* NULL the current_tb here so cpu_interrupt() doesn't do
522 anything unnecessary (like crashing during emulate single instruction). */
523 env->current_tb = NULL;
524 TMTimerPoll(env1->pVM);
525#endif
526 } /* for(;;) */
527
528#if defined(TARGET_I386)
529 /* restore flags in standard format */
530 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
531#else
532#error unsupported target CPU
533#endif
534#include "hostregs_helper.h"
535 return ret;
536}
537
538#else /* !VBOX */
539int cpu_exec(CPUState *env1)
540{
541#define DECLARE_HOST_REGS 1
542#include "hostregs_helper.h"
543 int ret, interrupt_request;
544 TranslationBlock *tb;
545 uint8_t *tc_ptr;
546 unsigned long next_tb;
547
548 if (cpu_halted(env1) == EXCP_HALTED)
549 return EXCP_HALTED;
550
551 cpu_single_env = env1;
552
553 /* first we save global registers */
554#define SAVE_HOST_REGS 1
555#include "hostregs_helper.h"
556 env = env1;
557
558 env_to_regs();
559#if defined(TARGET_I386)
560 /* put eflags in CPU temporary format */
561 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
562 DF = 1 - (2 * ((env->eflags >> 10) & 1));
563 CC_OP = CC_OP_EFLAGS;
564 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
565#elif defined(TARGET_SPARC)
566#elif defined(TARGET_M68K)
567 env->cc_op = CC_OP_FLAGS;
568 env->cc_dest = env->sr & 0xf;
569 env->cc_x = (env->sr >> 4) & 1;
570#elif defined(TARGET_ALPHA)
571#elif defined(TARGET_ARM)
572#elif defined(TARGET_PPC)
573#elif defined(TARGET_MIPS)
574#elif defined(TARGET_SH4)
575#elif defined(TARGET_CRIS)
576 /* XXXXX */
577#else
578#error unsupported target CPU
579#endif
580 env->exception_index = -1;
581
582 /* prepare setjmp context for exception handling */
583 for(;;) {
584 if (setjmp(env->jmp_env) == 0) {
585 env->current_tb = NULL;
586 /* if an exception is pending, we execute it here */
587 if (env->exception_index >= 0) {
588 if (env->exception_index >= EXCP_INTERRUPT) {
589 /* exit request from the cpu execution loop */
590 ret = env->exception_index;
591 break;
592 } else if (env->user_mode_only) {
593 /* if user mode only, we simulate a fake exception
594 which will be handled outside the cpu execution
595 loop */
596#if defined(TARGET_I386)
597 do_interrupt_user(env->exception_index,
598 env->exception_is_int,
599 env->error_code,
600 env->exception_next_eip);
601 /* successfully delivered */
602 env->old_exception = -1;
603#endif
604 ret = env->exception_index;
605 break;
606 } else {
607#if defined(TARGET_I386)
608 /* simulate a real cpu exception. On i386, it can
609 trigger new exceptions, but we do not handle
610 double or triple faults yet. */
611 do_interrupt(env->exception_index,
612 env->exception_is_int,
613 env->error_code,
614 env->exception_next_eip, 0);
615 /* successfully delivered */
616 env->old_exception = -1;
617#elif defined(TARGET_PPC)
618 do_interrupt(env);
619#elif defined(TARGET_MIPS)
620 do_interrupt(env);
621#elif defined(TARGET_SPARC)
622 do_interrupt(env);
623#elif defined(TARGET_ARM)
624 do_interrupt(env);
625#elif defined(TARGET_SH4)
626 do_interrupt(env);
627#elif defined(TARGET_ALPHA)
628 do_interrupt(env);
629#elif defined(TARGET_CRIS)
630 do_interrupt(env);
631#elif defined(TARGET_M68K)
632 do_interrupt(0);
633#endif
634 }
635 env->exception_index = -1;
636 }
637#ifdef USE_KQEMU
638 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
639 int ret;
640 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
641 ret = kqemu_cpu_exec(env);
642 /* put eflags in CPU temporary format */
643 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
644 DF = 1 - (2 * ((env->eflags >> 10) & 1));
645 CC_OP = CC_OP_EFLAGS;
646 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
647 if (ret == 1) {
648 /* exception */
649 longjmp(env->jmp_env, 1);
650 } else if (ret == 2) {
651 /* softmmu execution needed */
652 } else {
653 if (env->interrupt_request != 0) {
654 /* hardware interrupt will be executed just after */
655 } else {
656 /* otherwise, we restart */
657 longjmp(env->jmp_env, 1);
658 }
659 }
660 }
661#endif
662
663 next_tb = 0; /* force lookup of first TB */
664 for(;;) {
665 interrupt_request = env->interrupt_request;
666 if (unlikely(interrupt_request) &&
667 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
668 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
669 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
670 env->exception_index = EXCP_DEBUG;
671 cpu_loop_exit();
672 }
673#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
674 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
675 if (interrupt_request & CPU_INTERRUPT_HALT) {
676 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
677 env->halted = 1;
678 env->exception_index = EXCP_HLT;
679 cpu_loop_exit();
680 }
681#endif
682#if defined(TARGET_I386)
683 if (env->hflags2 & HF2_GIF_MASK) {
684 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
685 !(env->hflags & HF_SMM_MASK)) {
686 svm_check_intercept(SVM_EXIT_SMI);
687 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
688 do_smm_enter();
689 next_tb = 0;
690 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
691 !(env->hflags2 & HF2_NMI_MASK)) {
692 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
693 env->hflags2 |= HF2_NMI_MASK;
694 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
695 next_tb = 0;
696 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
697 (((env->hflags2 & HF2_VINTR_MASK) &&
698 (env->hflags2 & HF2_HIF_MASK)) ||
699 (!(env->hflags2 & HF2_VINTR_MASK) &&
700 (env->eflags & IF_MASK &&
701 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
702 int intno;
703 svm_check_intercept(SVM_EXIT_INTR);
704 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
705 intno = cpu_get_pic_interrupt(env);
706 if (loglevel & CPU_LOG_TB_IN_ASM) {
707 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
708 }
709 do_interrupt(intno, 0, 0, 0, 1);
710 /* ensure that no TB jump will be modified as
711 the program flow was changed */
712 next_tb = 0;
713#if !defined(CONFIG_USER_ONLY)
714 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
715 (env->eflags & IF_MASK) &&
716 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
717 int intno;
718 /* FIXME: this should respect TPR */
719 svm_check_intercept(SVM_EXIT_VINTR);
720 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
721 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
722 if (loglevel & CPU_LOG_TB_IN_ASM)
723 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
724 do_interrupt(intno, 0, 0, 0, 1);
725 next_tb = 0;
726#endif
727 }
728 }
729#elif defined(TARGET_PPC)
730#if 0
731 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
732 cpu_ppc_reset(env);
733 }
734#endif
735 if (interrupt_request & CPU_INTERRUPT_HARD) {
736 ppc_hw_interrupt(env);
737 if (env->pending_interrupts == 0)
738 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
739 next_tb = 0;
740 }
741#elif defined(TARGET_MIPS)
742 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
743 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
744 (env->CP0_Status & (1 << CP0St_IE)) &&
745 !(env->CP0_Status & (1 << CP0St_EXL)) &&
746 !(env->CP0_Status & (1 << CP0St_ERL)) &&
747 !(env->hflags & MIPS_HFLAG_DM)) {
748 /* Raise it */
749 env->exception_index = EXCP_EXT_INTERRUPT;
750 env->error_code = 0;
751 do_interrupt(env);
752 next_tb = 0;
753 }
754#elif defined(TARGET_SPARC)
755 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
756 (env->psret != 0)) {
757 int pil = env->interrupt_index & 15;
758 int type = env->interrupt_index & 0xf0;
759
760 if (((type == TT_EXTINT) &&
761 (pil == 15 || pil > env->psrpil)) ||
762 type != TT_EXTINT) {
763 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
764 env->exception_index = env->interrupt_index;
765 do_interrupt(env);
766 env->interrupt_index = 0;
767#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
768 cpu_check_irqs(env);
769#endif
770 next_tb = 0;
771 }
772 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
773 //do_interrupt(0, 0, 0, 0, 0);
774 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
775 }
776#elif defined(TARGET_ARM)
777 if (interrupt_request & CPU_INTERRUPT_FIQ
778 && !(env->uncached_cpsr & CPSR_F)) {
779 env->exception_index = EXCP_FIQ;
780 do_interrupt(env);
781 next_tb = 0;
782 }
783 /* ARMv7-M interrupt return works by loading a magic value
784 into the PC. On real hardware the load causes the
785 return to occur. The qemu implementation performs the
786 jump normally, then does the exception return when the
787 CPU tries to execute code at the magic address.
788 This will cause the magic PC value to be pushed to
789 the stack if an interrupt occured at the wrong time.
790 We avoid this by disabling interrupts when
791 pc contains a magic address. */
792 if (interrupt_request & CPU_INTERRUPT_HARD
793 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
794 || !(env->uncached_cpsr & CPSR_I))) {
795 env->exception_index = EXCP_IRQ;
796 do_interrupt(env);
797 next_tb = 0;
798 }
799#elif defined(TARGET_SH4)
800 if (interrupt_request & CPU_INTERRUPT_HARD) {
801 do_interrupt(env);
802 next_tb = 0;
803 }
804#elif defined(TARGET_ALPHA)
805 if (interrupt_request & CPU_INTERRUPT_HARD) {
806 do_interrupt(env);
807 next_tb = 0;
808 }
809#elif defined(TARGET_CRIS)
810 if (interrupt_request & CPU_INTERRUPT_HARD
811 && (env->pregs[PR_CCS] & I_FLAG)) {
812 env->exception_index = EXCP_IRQ;
813 do_interrupt(env);
814 next_tb = 0;
815 }
816 if (interrupt_request & CPU_INTERRUPT_NMI
817 && (env->pregs[PR_CCS] & M_FLAG)) {
818 env->exception_index = EXCP_NMI;
819 do_interrupt(env);
820 next_tb = 0;
821 }
822#elif defined(TARGET_M68K)
823 if (interrupt_request & CPU_INTERRUPT_HARD
824 && ((env->sr & SR_I) >> SR_I_SHIFT)
825 < env->pending_level) {
826 /* Real hardware gets the interrupt vector via an
827 IACK cycle at this point. Current emulated
828 hardware doesn't rely on this, so we
829 provide/save the vector when the interrupt is
830 first signalled. */
831 env->exception_index = env->pending_vector;
832 do_interrupt(1);
833 next_tb = 0;
834 }
835#endif
836 /* Don't use the cached interupt_request value,
837 do_interrupt may have updated the EXITTB flag. */
838 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
839 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
840 /* ensure that no TB jump will be modified as
841 the program flow was changed */
842 next_tb = 0;
843 }
844 if (interrupt_request & CPU_INTERRUPT_EXIT) {
845 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
846 env->exception_index = EXCP_INTERRUPT;
847 cpu_loop_exit();
848 }
849 }
850#ifdef DEBUG_EXEC
851 if ((loglevel & CPU_LOG_TB_CPU)) {
852 /* restore flags in standard format */
853 regs_to_env();
854#if defined(TARGET_I386)
855 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
856 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
857 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
858#elif defined(TARGET_ARM)
859 cpu_dump_state(env, logfile, fprintf, 0);
860#elif defined(TARGET_SPARC)
861 cpu_dump_state(env, logfile, fprintf, 0);
862#elif defined(TARGET_PPC)
863 cpu_dump_state(env, logfile, fprintf, 0);
864#elif defined(TARGET_M68K)
865 cpu_m68k_flush_flags(env, env->cc_op);
866 env->cc_op = CC_OP_FLAGS;
867 env->sr = (env->sr & 0xffe0)
868 | env->cc_dest | (env->cc_x << 4);
869 cpu_dump_state(env, logfile, fprintf, 0);
870#elif defined(TARGET_MIPS)
871 cpu_dump_state(env, logfile, fprintf, 0);
872#elif defined(TARGET_SH4)
873 cpu_dump_state(env, logfile, fprintf, 0);
874#elif defined(TARGET_ALPHA)
875 cpu_dump_state(env, logfile, fprintf, 0);
876#elif defined(TARGET_CRIS)
877 cpu_dump_state(env, logfile, fprintf, 0);
878#else
879#error unsupported target CPU
880#endif
881 }
882#endif
883 spin_lock(&tb_lock);
884 tb = tb_find_fast();
885 /* Note: we do it here to avoid a gcc bug on Mac OS X when
886 doing it in tb_find_slow */
887 if (tb_invalidated_flag) {
888 /* as some TB could have been invalidated because
889 of memory exceptions while generating the code, we
890 must recompute the hash index here */
891 next_tb = 0;
892 tb_invalidated_flag = 0;
893 }
894#ifdef DEBUG_EXEC
895 if ((loglevel & CPU_LOG_EXEC)) {
896 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
897 (long)tb->tc_ptr, tb->pc,
898 lookup_symbol(tb->pc));
899 }
900#endif
901 /* see if we can patch the calling TB. When the TB
902 spans two pages, we cannot safely do a direct
903 jump. */
904 {
905 if (next_tb != 0 &&
906#ifdef USE_KQEMU
907 (env->kqemu_enabled != 2) &&
908#endif
909 tb->page_addr[1] == -1) {
910 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
911 }
912 }
913 spin_unlock(&tb_lock);
914 env->current_tb = tb;
915 while (env->current_tb) {
916 tc_ptr = tb->tc_ptr;
917 /* execute the generated code */
918#if defined(__sparc__) && !defined(HOST_SOLARIS)
919#undef env
920 env = cpu_single_env;
921#define env cpu_single_env
922#endif
923 next_tb = tcg_qemu_tb_exec(tc_ptr);
924 env->current_tb = NULL;
925 if ((next_tb & 3) == 2) {
926 /* Instruction counter expired. */
927 int insns_left;
928 tb = (TranslationBlock *)(long)(next_tb & ~3);
929 /* Restore PC. */
930 CPU_PC_FROM_TB(env, tb);
931 insns_left = env->icount_decr.u32;
932 if (env->icount_extra && insns_left >= 0) {
933 /* Refill decrementer and continue execution. */
934 env->icount_extra += insns_left;
935 if (env->icount_extra > 0xffff) {
936 insns_left = 0xffff;
937 } else {
938 insns_left = env->icount_extra;
939 }
940 env->icount_extra -= insns_left;
941 env->icount_decr.u16.low = insns_left;
942 } else {
943 if (insns_left > 0) {
944 /* Execute remaining instructions. */
945 cpu_exec_nocache(insns_left, tb);
946 }
947 env->exception_index = EXCP_INTERRUPT;
948 next_tb = 0;
949 cpu_loop_exit();
950 }
951 }
952 }
953 /* reset soft MMU for next block (it can currently
954 only be set by a memory fault) */
955#if defined(USE_KQEMU)
956#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
957 if (kqemu_is_ok(env) &&
958 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
959 cpu_loop_exit();
960 }
961#endif
962 } /* for(;;) */
963 } else {
964 env_to_regs();
965 }
966 } /* for(;;) */
967
968
969#if defined(TARGET_I386)
970 /* restore flags in standard format */
971 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
972#elif defined(TARGET_ARM)
973 /* XXX: Save/restore host fpu exception state?. */
974#elif defined(TARGET_SPARC)
975#elif defined(TARGET_PPC)
976#elif defined(TARGET_M68K)
977 cpu_m68k_flush_flags(env, env->cc_op);
978 env->cc_op = CC_OP_FLAGS;
979 env->sr = (env->sr & 0xffe0)
980 | env->cc_dest | (env->cc_x << 4);
981#elif defined(TARGET_MIPS)
982#elif defined(TARGET_SH4)
983#elif defined(TARGET_ALPHA)
984#elif defined(TARGET_CRIS)
985 /* XXXXX */
986#else
987#error unsupported target CPU
988#endif
989
990 /* restore global registers */
991#include "hostregs_helper.h"
992
993 /* fail safe : never use cpu_single_env outside cpu_exec() */
994 cpu_single_env = NULL;
995 return ret;
996}
997#endif /* !VBOX */
998
999/* must only be called from the generated code as an exception can be
1000 generated */
1001void tb_invalidate_page_range(target_ulong start, target_ulong end)
1002{
1003 /* XXX: cannot enable it yet because it yields to MMU exception
1004 where NIP != read address on PowerPC */
1005#if 0
1006 target_ulong phys_addr;
1007 phys_addr = get_phys_addr_code(env, start);
1008 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1009#endif
1010}
1011
1012#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1013
1014void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1015{
1016 CPUX86State *saved_env;
1017
1018 saved_env = env;
1019 env = s;
1020 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1021 selector &= 0xffff;
1022 cpu_x86_load_seg_cache(env, seg_reg, selector,
1023 (selector << 4), 0xffff, 0);
1024 } else {
1025 load_seg(seg_reg, selector);
1026 }
1027 env = saved_env;
1028}
1029
1030void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
1031{
1032 CPUX86State *saved_env;
1033
1034 saved_env = env;
1035 env = s;
1036
1037 helper_fsave((target_ulong)ptr, data32);
1038
1039 env = saved_env;
1040}
1041
1042void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
1043{
1044 CPUX86State *saved_env;
1045
1046 saved_env = env;
1047 env = s;
1048
1049 helper_frstor((target_ulong)ptr, data32);
1050
1051 env = saved_env;
1052}
1053
1054#endif /* TARGET_I386 */
1055
1056#if !defined(CONFIG_SOFTMMU)
1057
1058#if defined(TARGET_I386)
1059
1060/* 'pc' is the host PC at which the exception was raised. 'address' is
1061 the effective address of the memory exception. 'is_write' is 1 if a
1062 write caused the exception and otherwise 0'. 'old_set' is the
1063 signal set which should be restored */
1064static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1065 int is_write, sigset_t *old_set,
1066 void *puc)
1067{
1068 TranslationBlock *tb;
1069 int ret;
1070
1071 if (cpu_single_env)
1072 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1073#if defined(DEBUG_SIGNAL)
1074 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1075 pc, address, is_write, *(unsigned long *)old_set);
1076#endif
1077 /* XXX: locking issue */
1078 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1079 return 1;
1080 }
1081
1082 /* see if it is an MMU fault */
1083 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
1084 ((env->hflags & HF_CPL_MASK) == 3), 0);
1085 if (ret < 0)
1086 return 0; /* not an MMU fault */
1087 if (ret == 0)
1088 return 1; /* the MMU fault was handled without causing real CPU fault */
1089 /* now we have a real cpu fault */
1090 tb = tb_find_pc(pc);
1091 if (tb) {
1092 /* the PC is inside the translated code. It means that we have
1093 a virtual CPU fault */
1094 cpu_restore_state(tb, env, pc, puc);
1095 }
1096 if (ret == 1) {
1097#if 0
1098 printf("PF exception: EIP=0x%VGv CR2=0x%VGv error=0x%x\n",
1099 env->eip, env->cr[2], env->error_code);
1100#endif
1101 /* we restore the process signal mask as the sigreturn should
1102 do it (XXX: use sigsetjmp) */
1103 sigprocmask(SIG_SETMASK, old_set, NULL);
1104 raise_exception_err(env->exception_index, env->error_code);
1105 } else {
1106 /* activate soft MMU for this block */
1107 env->hflags |= HF_SOFTMMU_MASK;
1108 cpu_resume_from_signal(env, puc);
1109 }
1110 /* never comes here */
1111 return 1;
1112}
1113
1114#elif defined(TARGET_ARM)
1115static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1116 int is_write, sigset_t *old_set,
1117 void *puc)
1118{
1119 TranslationBlock *tb;
1120 int ret;
1121
1122 if (cpu_single_env)
1123 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1124#if defined(DEBUG_SIGNAL)
1125 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1126 pc, address, is_write, *(unsigned long *)old_set);
1127#endif
1128 /* XXX: locking issue */
1129 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1130 return 1;
1131 }
1132 /* see if it is an MMU fault */
1133 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
1134 if (ret < 0)
1135 return 0; /* not an MMU fault */
1136 if (ret == 0)
1137 return 1; /* the MMU fault was handled without causing real CPU fault */
1138 /* now we have a real cpu fault */
1139 tb = tb_find_pc(pc);
1140 if (tb) {
1141 /* the PC is inside the translated code. It means that we have
1142 a virtual CPU fault */
1143 cpu_restore_state(tb, env, pc, puc);
1144 }
1145 /* we restore the process signal mask as the sigreturn should
1146 do it (XXX: use sigsetjmp) */
1147 sigprocmask(SIG_SETMASK, old_set, NULL);
1148 cpu_loop_exit();
1149}
1150#elif defined(TARGET_SPARC)
1151static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1152 int is_write, sigset_t *old_set,
1153 void *puc)
1154{
1155 TranslationBlock *tb;
1156 int ret;
1157
1158 if (cpu_single_env)
1159 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1160#if defined(DEBUG_SIGNAL)
1161 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1162 pc, address, is_write, *(unsigned long *)old_set);
1163#endif
1164 /* XXX: locking issue */
1165 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1166 return 1;
1167 }
1168 /* see if it is an MMU fault */
1169 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
1170 if (ret < 0)
1171 return 0; /* not an MMU fault */
1172 if (ret == 0)
1173 return 1; /* the MMU fault was handled without causing real CPU fault */
1174 /* now we have a real cpu fault */
1175 tb = tb_find_pc(pc);
1176 if (tb) {
1177 /* the PC is inside the translated code. It means that we have
1178 a virtual CPU fault */
1179 cpu_restore_state(tb, env, pc, puc);
1180 }
1181 /* we restore the process signal mask as the sigreturn should
1182 do it (XXX: use sigsetjmp) */
1183 sigprocmask(SIG_SETMASK, old_set, NULL);
1184 cpu_loop_exit();
1185}
1186#elif defined (TARGET_PPC)
1187static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1188 int is_write, sigset_t *old_set,
1189 void *puc)
1190{
1191 TranslationBlock *tb;
1192 int ret;
1193
1194 if (cpu_single_env)
1195 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1196#if defined(DEBUG_SIGNAL)
1197 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1198 pc, address, is_write, *(unsigned long *)old_set);
1199#endif
1200 /* XXX: locking issue */
1201 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1202 return 1;
1203 }
1204
1205 /* see if it is an MMU fault */
1206 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1207 if (ret < 0)
1208 return 0; /* not an MMU fault */
1209 if (ret == 0)
1210 return 1; /* the MMU fault was handled without causing real CPU fault */
1211
1212 /* now we have a real cpu fault */
1213 tb = tb_find_pc(pc);
1214 if (tb) {
1215 /* the PC is inside the translated code. It means that we have
1216 a virtual CPU fault */
1217 cpu_restore_state(tb, env, pc, puc);
1218 }
1219 if (ret == 1) {
1220#if 0
1221 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1222 env->nip, env->error_code, tb);
1223#endif
1224 /* we restore the process signal mask as the sigreturn should
1225 do it (XXX: use sigsetjmp) */
1226 sigprocmask(SIG_SETMASK, old_set, NULL);
1227 do_raise_exception_err(env->exception_index, env->error_code);
1228 } else {
1229 /* activate soft MMU for this block */
1230 cpu_resume_from_signal(env, puc);
1231 }
1232 /* never comes here */
1233 return 1;
1234}
1235
1236#elif defined(TARGET_M68K)
1237static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1238 int is_write, sigset_t *old_set,
1239 void *puc)
1240{
1241 TranslationBlock *tb;
1242 int ret;
1243
1244 if (cpu_single_env)
1245 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1246#if defined(DEBUG_SIGNAL)
1247 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1248 pc, address, is_write, *(unsigned long *)old_set);
1249#endif
1250 /* XXX: locking issue */
1251 if (is_write && page_unprotect(address, pc, puc)) {
1252 return 1;
1253 }
1254 /* see if it is an MMU fault */
1255 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
1256 if (ret < 0)
1257 return 0; /* not an MMU fault */
1258 if (ret == 0)
1259 return 1; /* the MMU fault was handled without causing real CPU fault */
1260 /* now we have a real cpu fault */
1261 tb = tb_find_pc(pc);
1262 if (tb) {
1263 /* the PC is inside the translated code. It means that we have
1264 a virtual CPU fault */
1265 cpu_restore_state(tb, env, pc, puc);
1266 }
1267 /* we restore the process signal mask as the sigreturn should
1268 do it (XXX: use sigsetjmp) */
1269 sigprocmask(SIG_SETMASK, old_set, NULL);
1270 cpu_loop_exit();
1271 /* never comes here */
1272 return 1;
1273}
1274
1275#elif defined (TARGET_MIPS)
1276static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1277 int is_write, sigset_t *old_set,
1278 void *puc)
1279{
1280 TranslationBlock *tb;
1281 int ret;
1282
1283 if (cpu_single_env)
1284 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1285#if defined(DEBUG_SIGNAL)
1286 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1287 pc, address, is_write, *(unsigned long *)old_set);
1288#endif
1289 /* XXX: locking issue */
1290 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1291 return 1;
1292 }
1293
1294 /* see if it is an MMU fault */
1295 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1296 if (ret < 0)
1297 return 0; /* not an MMU fault */
1298 if (ret == 0)
1299 return 1; /* the MMU fault was handled without causing real CPU fault */
1300
1301 /* now we have a real cpu fault */
1302 tb = tb_find_pc(pc);
1303 if (tb) {
1304 /* the PC is inside the translated code. It means that we have
1305 a virtual CPU fault */
1306 cpu_restore_state(tb, env, pc, puc);
1307 }
1308 if (ret == 1) {
1309#if 0
1310 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1311 env->nip, env->error_code, tb);
1312#endif
1313 /* we restore the process signal mask as the sigreturn should
1314 do it (XXX: use sigsetjmp) */
1315 sigprocmask(SIG_SETMASK, old_set, NULL);
1316 do_raise_exception_err(env->exception_index, env->error_code);
1317 } else {
1318 /* activate soft MMU for this block */
1319 cpu_resume_from_signal(env, puc);
1320 }
1321 /* never comes here */
1322 return 1;
1323}
1324
1325#elif defined (TARGET_SH4)
1326static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1327 int is_write, sigset_t *old_set,
1328 void *puc)
1329{
1330 TranslationBlock *tb;
1331 int ret;
1332
1333 if (cpu_single_env)
1334 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1335#if defined(DEBUG_SIGNAL)
1336 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1337 pc, address, is_write, *(unsigned long *)old_set);
1338#endif
1339 /* XXX: locking issue */
1340 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1341 return 1;
1342 }
1343
1344 /* see if it is an MMU fault */
1345 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
1346 if (ret < 0)
1347 return 0; /* not an MMU fault */
1348 if (ret == 0)
1349 return 1; /* the MMU fault was handled without causing real CPU fault */
1350
1351 /* now we have a real cpu fault */
1352 tb = tb_find_pc(pc);
1353 if (tb) {
1354 /* the PC is inside the translated code. It means that we have
1355 a virtual CPU fault */
1356 cpu_restore_state(tb, env, pc, puc);
1357 }
1358#if 0
1359 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1360 env->nip, env->error_code, tb);
1361#endif
1362 /* we restore the process signal mask as the sigreturn should
1363 do it (XXX: use sigsetjmp) */
1364 sigprocmask(SIG_SETMASK, old_set, NULL);
1365 cpu_loop_exit();
1366 /* never comes here */
1367 return 1;
1368}
1369#else
1370#error unsupported target CPU
1371#endif
1372
1373#if defined(__i386__)
1374
1375#if defined(__APPLE__)
1376# include <sys/ucontext.h>
1377
1378# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1379# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1380# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1381#else
1382# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1383# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1384# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1385#endif
1386
1387int cpu_signal_handler(int host_signum, void *pinfo,
1388 void *puc)
1389{
1390 siginfo_t *info = pinfo;
1391 struct ucontext *uc = puc;
1392 unsigned long pc;
1393 int trapno;
1394
1395#ifndef REG_EIP
1396/* for glibc 2.1 */
1397#define REG_EIP EIP
1398#define REG_ERR ERR
1399#define REG_TRAPNO TRAPNO
1400#endif
1401 pc = uc->uc_mcontext.gregs[REG_EIP];
1402 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1403#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1404 if (trapno == 0x00 || trapno == 0x05) {
1405 /* send division by zero or bound exception */
1406 cpu_send_trap(pc, trapno, uc);
1407 return 1;
1408 } else
1409#endif
1410 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1411 trapno == 0xe ?
1412 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1413 &uc->uc_sigmask, puc);
1414}
1415
1416#elif defined(__x86_64__)
1417
1418int cpu_signal_handler(int host_signum, void *pinfo,
1419 void *puc)
1420{
1421 siginfo_t *info = pinfo;
1422 struct ucontext *uc = puc;
1423 unsigned long pc;
1424
1425 pc = uc->uc_mcontext.gregs[REG_RIP];
1426 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1427 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1428 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1429 &uc->uc_sigmask, puc);
1430}
1431
1432#elif defined(__powerpc__)
1433
1434/***********************************************************************
1435 * signal context platform-specific definitions
1436 * From Wine
1437 */
1438#ifdef linux
1439/* All Registers access - only for local access */
1440# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1441/* Gpr Registers access */
1442# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1443# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1444# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1445# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1446# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1447# define LR_sig(context) REG_sig(link, context) /* Link register */
1448# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1449/* Float Registers access */
1450# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1451# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1452/* Exception Registers access */
1453# define DAR_sig(context) REG_sig(dar, context)
1454# define DSISR_sig(context) REG_sig(dsisr, context)
1455# define TRAP_sig(context) REG_sig(trap, context)
1456#endif /* linux */
1457
1458#ifdef __APPLE__
1459# include <sys/ucontext.h>
1460typedef struct ucontext SIGCONTEXT;
1461/* All Registers access - only for local access */
1462# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1463# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1464# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1465# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1466/* Gpr Registers access */
1467# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1468# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1469# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1470# define CTR_sig(context) REG_sig(ctr, context)
1471# define XER_sig(context) REG_sig(xer, context) /* Link register */
1472# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1473# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1474/* Float Registers access */
1475# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1476# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1477/* Exception Registers access */
1478# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1479# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1480# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1481#endif /* __APPLE__ */
1482
1483int cpu_signal_handler(int host_signum, void *pinfo,
1484 void *puc)
1485{
1486 siginfo_t *info = pinfo;
1487 struct ucontext *uc = puc;
1488 unsigned long pc;
1489 int is_write;
1490
1491 pc = IAR_sig(uc);
1492 is_write = 0;
1493#if 0
1494 /* ppc 4xx case */
1495 if (DSISR_sig(uc) & 0x00800000)
1496 is_write = 1;
1497#else
1498 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1499 is_write = 1;
1500#endif
1501 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1502 is_write, &uc->uc_sigmask, puc);
1503}
1504
1505#elif defined(__alpha__)
1506
1507int cpu_signal_handler(int host_signum, void *pinfo,
1508 void *puc)
1509{
1510 siginfo_t *info = pinfo;
1511 struct ucontext *uc = puc;
1512 uint32_t *pc = uc->uc_mcontext.sc_pc;
1513 uint32_t insn = *pc;
1514 int is_write = 0;
1515
1516 /* XXX: need kernel patch to get write flag faster */
1517 switch (insn >> 26) {
1518 case 0x0d: // stw
1519 case 0x0e: // stb
1520 case 0x0f: // stq_u
1521 case 0x24: // stf
1522 case 0x25: // stg
1523 case 0x26: // sts
1524 case 0x27: // stt
1525 case 0x2c: // stl
1526 case 0x2d: // stq
1527 case 0x2e: // stl_c
1528 case 0x2f: // stq_c
1529 is_write = 1;
1530 }
1531
1532 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1533 is_write, &uc->uc_sigmask, puc);
1534}
1535#elif defined(__sparc__)
1536
1537int cpu_signal_handler(int host_signum, void *pinfo,
1538 void *puc)
1539{
1540 siginfo_t *info = pinfo;
1541 uint32_t *regs = (uint32_t *)(info + 1);
1542 void *sigmask = (regs + 20);
1543 unsigned long pc;
1544 int is_write;
1545 uint32_t insn;
1546
1547 /* XXX: is there a standard glibc define ? */
1548 pc = regs[1];
1549 /* XXX: need kernel patch to get write flag faster */
1550 is_write = 0;
1551 insn = *(uint32_t *)pc;
1552 if ((insn >> 30) == 3) {
1553 switch((insn >> 19) & 0x3f) {
1554 case 0x05: // stb
1555 case 0x06: // sth
1556 case 0x04: // st
1557 case 0x07: // std
1558 case 0x24: // stf
1559 case 0x27: // stdf
1560 case 0x25: // stfsr
1561 is_write = 1;
1562 break;
1563 }
1564 }
1565 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1566 is_write, sigmask, NULL);
1567}
1568
1569#elif defined(__arm__)
1570
1571int cpu_signal_handler(int host_signum, void *pinfo,
1572 void *puc)
1573{
1574 siginfo_t *info = pinfo;
1575 struct ucontext *uc = puc;
1576 unsigned long pc;
1577 int is_write;
1578
1579 pc = uc->uc_mcontext.gregs[R15];
1580 /* XXX: compute is_write */
1581 is_write = 0;
1582 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1583 is_write,
1584 &uc->uc_sigmask, puc);
1585}
1586
1587#elif defined(__mc68000)
1588
1589int cpu_signal_handler(int host_signum, void *pinfo,
1590 void *puc)
1591{
1592 siginfo_t *info = pinfo;
1593 struct ucontext *uc = puc;
1594 unsigned long pc;
1595 int is_write;
1596
1597 pc = uc->uc_mcontext.gregs[16];
1598 /* XXX: compute is_write */
1599 is_write = 0;
1600 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1601 is_write,
1602 &uc->uc_sigmask, puc);
1603}
1604
1605#elif defined(__ia64)
1606
1607#ifndef __ISR_VALID
1608 /* This ought to be in <bits/siginfo.h>... */
1609# define __ISR_VALID 1
1610#endif
1611
1612int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1613{
1614 siginfo_t *info = pinfo;
1615 struct ucontext *uc = puc;
1616 unsigned long ip;
1617 int is_write = 0;
1618
1619 ip = uc->uc_mcontext.sc_ip;
1620 switch (host_signum) {
1621 case SIGILL:
1622 case SIGFPE:
1623 case SIGSEGV:
1624 case SIGBUS:
1625 case SIGTRAP:
1626 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1627 /* ISR.W (write-access) is bit 33: */
1628 is_write = (info->si_isr >> 33) & 1;
1629 break;
1630
1631 default:
1632 break;
1633 }
1634 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1635 is_write,
1636 &uc->uc_sigmask, puc);
1637}
1638
1639#elif defined(__s390__)
1640
1641int cpu_signal_handler(int host_signum, void *pinfo,
1642 void *puc)
1643{
1644 siginfo_t *info = pinfo;
1645 struct ucontext *uc = puc;
1646 unsigned long pc;
1647 int is_write;
1648
1649 pc = uc->uc_mcontext.psw.addr;
1650 /* XXX: compute is_write */
1651 is_write = 0;
1652 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1653 is_write,
1654 &uc->uc_sigmask, puc);
1655}
1656
1657#else
1658
1659#error host CPU specific signal handler needed
1660
1661#endif
1662
1663#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use