VirtualBox

source: vbox/trunk/src/recompiler/exec.c@ 69564

Last change on this file since 69564 was 69465, checked in by vboxsync, 7 years ago

recompiler: scm updates

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 133.0 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <iprt/alloc.h>
48# include <iprt/string.h>
49# include <iprt/param.h>
50# include <VBox/vmm/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
51#endif /* VBOX */
52
53#include "cpu.h"
54#include "exec-all.h"
55#include "qemu-common.h"
56#include "tcg.h"
57#ifndef VBOX
58#include "hw/hw.h"
59#include "hw/qdev.h"
60#endif /* !VBOX */
61#include "osdep.h"
62#include "kvm.h"
63#include "qemu-timer.h"
64#if defined(CONFIG_USER_ONLY)
65#include <qemu.h>
66#include <signal.h>
67#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
68#include <sys/param.h>
69#if __FreeBSD_version >= 700104
70#define HAVE_KINFO_GETVMMAP
71#define sigqueue sigqueue_freebsd /* avoid redefinition */
72#include <sys/time.h>
73#include <sys/proc.h>
74#include <machine/profile.h>
75#define _KERNEL
76#include <sys/user.h>
77#undef _KERNEL
78#undef sigqueue
79#include <libutil.h>
80#endif
81#endif
82#endif
83
84//#define DEBUG_TB_INVALIDATE
85//#define DEBUG_FLUSH
86//#define DEBUG_TLB
87//#define DEBUG_UNASSIGNED
88
89/* make various TB consistency checks */
90//#define DEBUG_TB_CHECK
91//#define DEBUG_TLB_CHECK
92
93//#define DEBUG_IOPORT
94//#define DEBUG_SUBPAGE
95
96#if !defined(CONFIG_USER_ONLY)
97/* TB consistency checks only implemented for usermode emulation. */
98#undef DEBUG_TB_CHECK
99#endif
100
101#define SMC_BITMAP_USE_THRESHOLD 10
102
103static TranslationBlock *tbs;
104static int code_gen_max_blocks;
105TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
106static int nb_tbs;
107/* any access to the tbs or the page table must use this lock */
108spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
109
110#ifndef VBOX
111#if defined(__arm__) || defined(__sparc_v9__)
112/* The prologue must be reachable with a direct jump. ARM and Sparc64
113 have limited branch ranges (possibly also PPC) so place it in a
114 section close to code segment. */
115#define code_gen_section \
116 __attribute__((__section__(".gen_code"))) \
117 __attribute__((aligned (32)))
118#elif defined(_WIN32)
119/* Maximum alignment for Win32 is 16. */
120#define code_gen_section \
121 __attribute__((aligned (16)))
122#else
123#define code_gen_section \
124 __attribute__((aligned (32)))
125#endif
126
127uint8_t code_gen_prologue[1024] code_gen_section;
128#else /* VBOX */
129extern uint8_t *code_gen_prologue;
130#endif /* VBOX */
131static uint8_t *code_gen_buffer;
132static size_t code_gen_buffer_size;
133/* threshold to flush the translated code buffer */
134static size_t code_gen_buffer_max_size;
135static uint8_t *code_gen_ptr;
136
137#if !defined(CONFIG_USER_ONLY)
138# ifndef VBOX
139int phys_ram_fd;
140static int in_migration;
141# endif /* !VBOX */
142
143RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
144#endif
145
146CPUState *first_cpu;
147/* current CPU in the current thread. It is only valid inside
148 cpu_exec() */
149CPUState *cpu_single_env;
150/* 0 = Do not count executed instructions.
151 1 = Precise instruction counting.
152 2 = Adaptive rate instruction counting. */
153int use_icount = 0;
154/* Current instruction counter. While executing translated code this may
155 include some instructions that have not yet been executed. */
156int64_t qemu_icount;
157
158typedef struct PageDesc {
159 /* list of TBs intersecting this ram page */
160 TranslationBlock *first_tb;
161 /* in order to optimize self modifying code, we count the number
162 of lookups we do to a given page to use a bitmap */
163 unsigned int code_write_count;
164 uint8_t *code_bitmap;
165#if defined(CONFIG_USER_ONLY)
166 unsigned long flags;
167#endif
168} PageDesc;
169
170/* In system mode we want L1_MAP to be based on ram offsets,
171 while in user mode we want it to be based on virtual addresses. */
172#if !defined(CONFIG_USER_ONLY)
173#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
174# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
175#else
176# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
177#endif
178#else
179# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
180#endif
181
182/* Size of the L2 (and L3, etc) page tables. */
183#define L2_BITS 10
184#define L2_SIZE (1 << L2_BITS)
185
186/* The bits remaining after N lower levels of page tables. */
187#define P_L1_BITS_REM \
188 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
189#define V_L1_BITS_REM \
190 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
191
192/* Size of the L1 page table. Avoid silly small sizes. */
193#if P_L1_BITS_REM < 4
194#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
195#else
196#define P_L1_BITS P_L1_BITS_REM
197#endif
198
199#if V_L1_BITS_REM < 4
200#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
201#else
202#define V_L1_BITS V_L1_BITS_REM
203#endif
204
205#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
206#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
207
208#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
209#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
210
211size_t qemu_real_host_page_size;
212size_t qemu_host_page_bits;
213size_t qemu_host_page_size;
214uintptr_t qemu_host_page_mask;
215
216/* This is a multi-level map on the virtual address space.
217 The bottom level has pointers to PageDesc. */
218static void *l1_map[V_L1_SIZE];
219
220#if !defined(CONFIG_USER_ONLY)
221typedef struct PhysPageDesc {
222 /* offset in host memory of the page + io_index in the low bits */
223 ram_addr_t phys_offset;
224 ram_addr_t region_offset;
225} PhysPageDesc;
226
227/* This is a multi-level map on the physical address space.
228 The bottom level has pointers to PhysPageDesc. */
229static void *l1_phys_map[P_L1_SIZE];
230
231static void io_mem_init(void);
232
233/* io memory support */
234CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
235CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
236void *io_mem_opaque[IO_MEM_NB_ENTRIES];
237static char io_mem_used[IO_MEM_NB_ENTRIES];
238static int io_mem_watch;
239#endif
240
241#ifndef VBOX
242/* log support */
243#ifdef WIN32
244static const char *logfilename = "qemu.log";
245#else
246static const char *logfilename = "/tmp/qemu.log";
247#endif
248#endif /* !VBOX */
249FILE *logfile;
250int loglevel;
251#ifndef VBOX
252static int log_append = 0;
253#endif /* !VBOX */
254
255/* statistics */
256#ifndef VBOX
257#if !defined(CONFIG_USER_ONLY)
258static int tlb_flush_count;
259#endif
260static int tb_flush_count;
261static int tb_phys_invalidate_count;
262#else /* VBOX - Resettable U32 stats, see VBoxRecompiler.c. */
263uint32_t tlb_flush_count;
264uint32_t tb_flush_count;
265uint32_t tb_phys_invalidate_count;
266#endif /* VBOX */
267
268#ifndef VBOX
269#ifdef _WIN32
270static void map_exec(void *addr, size_t size)
271{
272 DWORD old_protect;
273 VirtualProtect(addr, size,
274 PAGE_EXECUTE_READWRITE, &old_protect);
275
276}
277#else
278static void map_exec(void *addr, size_t size)
279{
280 uintptr_t start, end, page_size;
281
282 page_size = getpagesize();
283 start = (uintptr_t)addr;
284 start &= ~(page_size - 1);
285
286 end = (uintptr_t)addr + size;
287 end += page_size - 1;
288 end &= ~(page_size - 1);
289
290 mprotect((void *)start, end - start,
291 PROT_READ | PROT_WRITE | PROT_EXEC);
292}
293#endif
294#else /* VBOX */
295static void map_exec(void *addr, size_t size)
296{
297 RTMemProtect(addr, size,
298 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
299}
300#endif /* VBOX */
301
302static void page_init(void)
303{
304 /* NOTE: we can always suppose that qemu_host_page_size >=
305 TARGET_PAGE_SIZE */
306#ifdef VBOX
307 RTMemProtect(code_gen_buffer, code_gen_buffer_size,
308 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
309 qemu_real_host_page_size = PAGE_SIZE;
310#else /* !VBOX */
311#ifdef _WIN32
312 {
313 SYSTEM_INFO system_info;
314
315 GetSystemInfo(&system_info);
316 qemu_real_host_page_size = system_info.dwPageSize;
317 }
318#else
319 qemu_real_host_page_size = getpagesize();
320#endif
321#endif /* !VBOX */
322 if (qemu_host_page_size == 0)
323 qemu_host_page_size = qemu_real_host_page_size;
324 if (qemu_host_page_size < TARGET_PAGE_SIZE)
325 qemu_host_page_size = TARGET_PAGE_SIZE;
326 qemu_host_page_bits = 0;
327 while ((1 << qemu_host_page_bits) < VBOX_ONLY((int))qemu_host_page_size)
328 qemu_host_page_bits++;
329 qemu_host_page_mask = ~(qemu_host_page_size - 1);
330
331#ifndef VBOX /* We use other means to set reserved bit on our pages */
332#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
333 {
334#ifdef HAVE_KINFO_GETVMMAP
335 struct kinfo_vmentry *freep;
336 int i, cnt;
337
338 freep = kinfo_getvmmap(getpid(), &cnt);
339 if (freep) {
340 mmap_lock();
341 for (i = 0; i < cnt; i++) {
342 uintptr_t startaddr, endaddr;
343
344 startaddr = freep[i].kve_start;
345 endaddr = freep[i].kve_end;
346 if (h2g_valid(startaddr)) {
347 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
348
349 if (h2g_valid(endaddr)) {
350 endaddr = h2g(endaddr);
351 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
352 } else {
353#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
354 endaddr = ~0ul;
355 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
356#endif
357 }
358 }
359 }
360 free(freep);
361 mmap_unlock();
362 }
363#else
364 FILE *f;
365
366 last_brk = (uintptr_t)sbrk(0);
367
368 f = fopen("/compat/linux/proc/self/maps", "r");
369 if (f) {
370 mmap_lock();
371
372 do {
373 uintptr_t startaddr, endaddr;
374 int n;
375
376 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
377
378 if (n == 2 && h2g_valid(startaddr)) {
379 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
380
381 if (h2g_valid(endaddr)) {
382 endaddr = h2g(endaddr);
383 } else {
384 endaddr = ~0ul;
385 }
386 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
387 }
388 } while (!feof(f));
389
390 fclose(f);
391 mmap_unlock();
392 }
393#endif
394 }
395#endif
396#endif /* !VBOX */
397}
398
399static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
400{
401 PageDesc *pd;
402 void **lp;
403 int i;
404
405#if defined(CONFIG_USER_ONLY)
406 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
407# define ALLOC(P, SIZE) \
408 do { \
409 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
410 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
411 } while (0)
412#else
413# define ALLOC(P, SIZE) \
414 do { P = qemu_mallocz(SIZE); } while (0)
415#endif
416
417 /* Level 1. Always allocated. */
418 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
419
420 /* Level 2..N-1. */
421 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
422 void **p = *lp;
423
424 if (p == NULL) {
425 if (!alloc) {
426 return NULL;
427 }
428 ALLOC(p, sizeof(void *) * L2_SIZE);
429 *lp = p;
430 }
431
432 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
433 }
434
435 pd = *lp;
436 if (pd == NULL) {
437 if (!alloc) {
438 return NULL;
439 }
440 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
441 *lp = pd;
442 }
443
444#undef ALLOC
445
446 return pd + (index & (L2_SIZE - 1));
447}
448
449static inline PageDesc *page_find(tb_page_addr_t index)
450{
451 return page_find_alloc(index, 0);
452}
453
454#if !defined(CONFIG_USER_ONLY)
455static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
456{
457 PhysPageDesc *pd;
458 void **lp;
459 int i;
460
461 /* Level 1. Always allocated. */
462 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
463
464 /* Level 2..N-1. */
465 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
466 void **p = *lp;
467 if (p == NULL) {
468 if (!alloc) {
469 return NULL;
470 }
471 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
472 }
473 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
474 }
475
476 pd = *lp;
477 if (pd == NULL) {
478 int i;
479
480 if (!alloc) {
481 return NULL;
482 }
483
484 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
485
486 for (i = 0; i < L2_SIZE; i++) {
487 pd[i].phys_offset = IO_MEM_UNASSIGNED;
488 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
489 }
490 }
491
492 return pd + (index & (L2_SIZE - 1));
493}
494
495static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
496{
497 return phys_page_find_alloc(index, 0);
498}
499
500static void tlb_protect_code(ram_addr_t ram_addr);
501static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
502 target_ulong vaddr);
503#define mmap_lock() do { } while(0)
504#define mmap_unlock() do { } while(0)
505#endif
506
507#ifdef VBOX /* We don't need such huge codegen buffer size, as execute
508 most of the code in raw or hm mode. */
509#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
510#else /* !VBOX */
511#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
512#endif /* !VBOX */
513
514#if defined(CONFIG_USER_ONLY)
515/* Currently it is not recommended to allocate big chunks of data in
516 user mode. It will change when a dedicated libc will be used */
517#define USE_STATIC_CODE_GEN_BUFFER
518#endif
519
520#if defined(VBOX) && defined(USE_STATIC_CODE_GEN_BUFFER)
521# error "VBox allocates codegen buffer dynamically"
522#endif
523
524#ifdef USE_STATIC_CODE_GEN_BUFFER
525static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
526 __attribute__((aligned (CODE_GEN_ALIGN)));
527#endif
528
529static void code_gen_alloc(uintptr_t tb_size)
530{
531#ifdef USE_STATIC_CODE_GEN_BUFFER
532 code_gen_buffer = static_code_gen_buffer;
533 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
534 map_exec(code_gen_buffer, code_gen_buffer_size);
535#else
536# ifdef VBOX
537 /* We cannot use phys_ram_size here, as it's 0 now,
538 * it only gets initialized once RAM registration callback
539 * (REMR3NotifyPhysRamRegister()) called.
540 */
541 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
542# else /* !VBOX */
543 code_gen_buffer_size = tb_size;
544 if (code_gen_buffer_size == 0) {
545#if defined(CONFIG_USER_ONLY)
546 /* in user mode, phys_ram_size is not meaningful */
547 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
548#else
549 /* XXX: needs adjustments */
550 code_gen_buffer_size = (uintptr_t)(ram_size / 4);
551#endif
552 }
553 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
554 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
555# endif /* !VBOX */
556 /* The code gen buffer location may have constraints depending on
557 the host cpu and OS */
558# ifdef VBOX
559 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
560
561 if (!code_gen_buffer) {
562 LogRel(("REM: failed allocate codegen buffer %lld\n",
563 code_gen_buffer_size));
564 return;
565 }
566# else /* !VBOX */
567#if defined(__linux__)
568 {
569 int flags;
570 void *start = NULL;
571
572 flags = MAP_PRIVATE | MAP_ANONYMOUS;
573#if defined(__x86_64__)
574 flags |= MAP_32BIT;
575 /* Cannot map more than that */
576 if (code_gen_buffer_size > (800 * 1024 * 1024))
577 code_gen_buffer_size = (800 * 1024 * 1024);
578#elif defined(__sparc_v9__)
579 // Map the buffer below 2G, so we can use direct calls and branches
580 flags |= MAP_FIXED;
581 start = (void *) 0x60000000UL;
582 if (code_gen_buffer_size > (512 * 1024 * 1024))
583 code_gen_buffer_size = (512 * 1024 * 1024);
584#elif defined(__arm__)
585 /* Map the buffer below 32M, so we can use direct calls and branches */
586 flags |= MAP_FIXED;
587 start = (void *) 0x01000000UL;
588 if (code_gen_buffer_size > 16 * 1024 * 1024)
589 code_gen_buffer_size = 16 * 1024 * 1024;
590#elif defined(__s390x__)
591 /* Map the buffer so that we can use direct calls and branches. */
592 /* We have a +- 4GB range on the branches; leave some slop. */
593 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
594 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
595 }
596 start = (void *)0x90000000UL;
597#endif
598 code_gen_buffer = mmap(start, code_gen_buffer_size,
599 PROT_WRITE | PROT_READ | PROT_EXEC,
600 flags, -1, 0);
601 if (code_gen_buffer == MAP_FAILED) {
602 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
603 exit(1);
604 }
605 }
606#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
607 {
608 int flags;
609 void *addr = NULL;
610 flags = MAP_PRIVATE | MAP_ANONYMOUS;
611#if defined(__x86_64__)
612 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
613 * 0x40000000 is free */
614 flags |= MAP_FIXED;
615 addr = (void *)0x40000000;
616 /* Cannot map more than that */
617 if (code_gen_buffer_size > (800 * 1024 * 1024))
618 code_gen_buffer_size = (800 * 1024 * 1024);
619#endif
620 code_gen_buffer = mmap(addr, code_gen_buffer_size,
621 PROT_WRITE | PROT_READ | PROT_EXEC,
622 flags, -1, 0);
623 if (code_gen_buffer == MAP_FAILED) {
624 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
625 exit(1);
626 }
627 }
628#else
629 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
630 map_exec(code_gen_buffer, code_gen_buffer_size);
631#endif
632# endif /* !VBOX */
633#endif /* !USE_STATIC_CODE_GEN_BUFFER */
634#ifndef VBOX /** @todo r=bird: why are we different? */
635 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
636#else
637 map_exec(code_gen_prologue, _1K);
638#endif
639 code_gen_buffer_max_size = code_gen_buffer_size -
640 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
641 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
642 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
643}
644
645/* Must be called before using the QEMU cpus. 'tb_size' is the size
646 (in bytes) allocated to the translation buffer. Zero means default
647 size. */
648void cpu_exec_init_all(uintptr_t tb_size)
649{
650 cpu_gen_init();
651 code_gen_alloc(tb_size);
652 code_gen_ptr = code_gen_buffer;
653 page_init();
654#if !defined(CONFIG_USER_ONLY)
655 io_mem_init();
656#endif
657#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
658 /* There's no guest base to take into account, so go ahead and
659 initialize the prologue now. */
660 tcg_prologue_init(&tcg_ctx);
661#endif
662}
663
664#ifndef VBOX
665#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
666
667static int cpu_common_post_load(void *opaque, int version_id)
668{
669 CPUState *env = opaque;
670
671 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
672 version_id is increased. */
673 env->interrupt_request &= ~0x01;
674 tlb_flush(env, 1);
675
676 return 0;
677}
678
679static const VMStateDescription vmstate_cpu_common = {
680 .name = "cpu_common",
681 .version_id = 1,
682 .minimum_version_id = 1,
683 .minimum_version_id_old = 1,
684 .post_load = cpu_common_post_load,
685 .fields = (VMStateField []) {
686 VMSTATE_UINT32(halted, CPUState),
687 VMSTATE_UINT32(interrupt_request, CPUState),
688 VMSTATE_END_OF_LIST()
689 }
690};
691#endif
692
693CPUState *qemu_get_cpu(int cpu)
694{
695 CPUState *env = first_cpu;
696
697 while (env) {
698 if (env->cpu_index == cpu)
699 break;
700 env = env->next_cpu;
701 }
702
703 return env;
704}
705
706#endif /* !VBOX */
707
708void cpu_exec_init(CPUState *env)
709{
710 CPUState **penv;
711 int cpu_index;
712
713#if defined(CONFIG_USER_ONLY)
714 cpu_list_lock();
715#endif
716 env->next_cpu = NULL;
717 penv = &first_cpu;
718 cpu_index = 0;
719 while (*penv != NULL) {
720 penv = &(*penv)->next_cpu;
721 cpu_index++;
722 }
723 env->cpu_index = cpu_index;
724 env->numa_node = 0;
725 QTAILQ_INIT(&env->breakpoints);
726 QTAILQ_INIT(&env->watchpoints);
727 *penv = env;
728#ifndef VBOX
729#if defined(CONFIG_USER_ONLY)
730 cpu_list_unlock();
731#endif
732#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
733 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
734 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
735 cpu_save, cpu_load, env);
736#endif
737#endif /* !VBOX */
738}
739
740static inline void invalidate_page_bitmap(PageDesc *p)
741{
742 if (p->code_bitmap) {
743 qemu_free(p->code_bitmap);
744 p->code_bitmap = NULL;
745 }
746 p->code_write_count = 0;
747}
748
749/* Set to NULL all the 'first_tb' fields in all PageDescs. */
750
751static void page_flush_tb_1 (int level, void **lp)
752{
753 int i;
754
755 if (*lp == NULL) {
756 return;
757 }
758 if (level == 0) {
759 PageDesc *pd = *lp;
760 for (i = 0; i < L2_SIZE; ++i) {
761 pd[i].first_tb = NULL;
762 invalidate_page_bitmap(pd + i);
763 }
764 } else {
765 void **pp = *lp;
766 for (i = 0; i < L2_SIZE; ++i) {
767 page_flush_tb_1 (level - 1, pp + i);
768 }
769 }
770}
771
772static void page_flush_tb(void)
773{
774 int i;
775 for (i = 0; i < V_L1_SIZE; i++) {
776 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
777 }
778}
779
780/* flush all the translation blocks */
781/* XXX: tb_flush is currently not thread safe */
782void tb_flush(CPUState *env1)
783{
784 CPUState *env;
785#ifdef VBOX
786 STAM_PROFILE_START(&env1->StatTbFlush, a);
787#endif
788#if defined(DEBUG_FLUSH)
789 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
790 (unsigned long)(code_gen_ptr - code_gen_buffer),
791 nb_tbs, nb_tbs > 0 ?
792 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
793#endif
794 if ((uintptr_t)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
795 cpu_abort(env1, "Internal error: code buffer overflow\n");
796
797 nb_tbs = 0;
798
799 for(env = first_cpu; env != NULL; env = env->next_cpu) {
800 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
801 }
802
803 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
804 page_flush_tb();
805
806 code_gen_ptr = code_gen_buffer;
807 /* XXX: flush processor icache at this point if cache flush is
808 expensive */
809 tb_flush_count++;
810#ifdef VBOX
811 STAM_PROFILE_STOP(&env1->StatTbFlush, a);
812#endif
813}
814
815#ifdef DEBUG_TB_CHECK
816
817static void tb_invalidate_check(target_ulong address)
818{
819 TranslationBlock *tb;
820 int i;
821 address &= TARGET_PAGE_MASK;
822 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
823 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
824 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
825 address >= tb->pc + tb->size)) {
826 printf("ERROR invalidate: address=" TARGET_FMT_lx
827 " PC=%08lx size=%04x\n",
828 address, (long)tb->pc, tb->size);
829 }
830 }
831 }
832}
833
834/* verify that all the pages have correct rights for code */
835static void tb_page_check(void)
836{
837 TranslationBlock *tb;
838 int i, flags1, flags2;
839
840 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
841 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
842 flags1 = page_get_flags(tb->pc);
843 flags2 = page_get_flags(tb->pc + tb->size - 1);
844 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
845 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
846 (long)tb->pc, tb->size, flags1, flags2);
847 }
848 }
849 }
850}
851
852#endif
853
854/* invalidate one TB */
855static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
856 int next_offset)
857{
858 TranslationBlock *tb1;
859 for(;;) {
860 tb1 = *ptb;
861 if (tb1 == tb) {
862 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
863 break;
864 }
865 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
866 }
867}
868
869static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
870{
871 TranslationBlock *tb1;
872 unsigned int n1;
873
874 for(;;) {
875 tb1 = *ptb;
876 n1 = (intptr_t)tb1 & 3;
877 tb1 = (TranslationBlock *)((intptr_t)tb1 & ~3);
878 if (tb1 == tb) {
879 *ptb = tb1->page_next[n1];
880 break;
881 }
882 ptb = &tb1->page_next[n1];
883 }
884}
885
886static inline void tb_jmp_remove(TranslationBlock *tb, int n)
887{
888 TranslationBlock *tb1, **ptb;
889 unsigned int n1;
890
891 ptb = &tb->jmp_next[n];
892 tb1 = *ptb;
893 if (tb1) {
894 /* find tb(n) in circular list */
895 for(;;) {
896 tb1 = *ptb;
897 n1 = (intptr_t)tb1 & 3;
898 tb1 = (TranslationBlock *)((intptr_t)tb1 & ~3);
899 if (n1 == n && tb1 == tb)
900 break;
901 if (n1 == 2) {
902 ptb = &tb1->jmp_first;
903 } else {
904 ptb = &tb1->jmp_next[n1];
905 }
906 }
907 /* now we can suppress tb(n) from the list */
908 *ptb = tb->jmp_next[n];
909
910 tb->jmp_next[n] = NULL;
911 }
912}
913
914/* reset the jump entry 'n' of a TB so that it is not chained to
915 another TB */
916static inline void tb_reset_jump(TranslationBlock *tb, int n)
917{
918 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
919}
920
921void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
922{
923 CPUState *env;
924 PageDesc *p;
925 unsigned int h, n1;
926 tb_page_addr_t phys_pc;
927 TranslationBlock *tb1, *tb2;
928
929 /* remove the TB from the hash list */
930 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
931 h = tb_phys_hash_func(phys_pc);
932 tb_remove(&tb_phys_hash[h], tb,
933 offsetof(TranslationBlock, phys_hash_next));
934
935 /* remove the TB from the page list */
936 if (tb->page_addr[0] != page_addr) {
937 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
938 tb_page_remove(&p->first_tb, tb);
939 invalidate_page_bitmap(p);
940 }
941 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
942 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
943 tb_page_remove(&p->first_tb, tb);
944 invalidate_page_bitmap(p);
945 }
946
947 tb_invalidated_flag = 1;
948
949 /* remove the TB from the hash list */
950 h = tb_jmp_cache_hash_func(tb->pc);
951 for(env = first_cpu; env != NULL; env = env->next_cpu) {
952 if (env->tb_jmp_cache[h] == tb)
953 env->tb_jmp_cache[h] = NULL;
954 }
955
956 /* suppress this TB from the two jump lists */
957 tb_jmp_remove(tb, 0);
958 tb_jmp_remove(tb, 1);
959
960 /* suppress any remaining jumps to this TB */
961 tb1 = tb->jmp_first;
962 for(;;) {
963 n1 = (intptr_t)tb1 & 3;
964 if (n1 == 2)
965 break;
966 tb1 = (TranslationBlock *)((intptr_t)tb1 & ~3);
967 tb2 = tb1->jmp_next[n1];
968 tb_reset_jump(tb1, n1);
969 tb1->jmp_next[n1] = NULL;
970 tb1 = tb2;
971 }
972 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2); /* fail safe */
973
974 tb_phys_invalidate_count++;
975}
976
977#ifdef VBOX
978
979void tb_invalidate_virt(CPUState *env, uint32_t eip)
980{
981# if 1
982 tb_flush(env);
983# else
984 uint8_t *cs_base, *pc;
985 unsigned int flags, h, phys_pc;
986 TranslationBlock *tb, **ptb;
987
988 flags = env->hflags;
989 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
990 cs_base = env->segs[R_CS].base;
991 pc = cs_base + eip;
992
993 tb = tb_find(&ptb, (uintptr_t)pc, (uintptr_t)cs_base,
994 flags);
995
996 if(tb)
997 {
998# ifdef DEBUG
999 printf("invalidating TB (%08X) at %08X\n", tb, eip);
1000# endif
1001 tb_invalidate(tb);
1002 //Note: this will leak TBs, but the whole cache will be flushed
1003 // when it happens too often
1004 tb->pc = 0;
1005 tb->cs_base = 0;
1006 tb->flags = 0;
1007 }
1008# endif
1009}
1010
1011# ifdef VBOX_STRICT
1012/**
1013 * Gets the page offset.
1014 */
1015ram_addr_t get_phys_page_offset(target_ulong addr)
1016{
1017 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
1018 return p ? p->phys_offset : 0;
1019}
1020# endif /* VBOX_STRICT */
1021
1022#endif /* VBOX */
1023
1024static inline void set_bits(uint8_t *tab, int start, int len)
1025{
1026 int end, mask, end1;
1027
1028 end = start + len;
1029 tab += start >> 3;
1030 mask = 0xff << (start & 7);
1031 if ((start & ~7) == (end & ~7)) {
1032 if (start < end) {
1033 mask &= ~(0xff << (end & 7));
1034 *tab |= mask;
1035 }
1036 } else {
1037 *tab++ |= mask;
1038 start = (start + 8) & ~7;
1039 end1 = end & ~7;
1040 while (start < end1) {
1041 *tab++ = 0xff;
1042 start += 8;
1043 }
1044 if (start < end) {
1045 mask = ~(0xff << (end & 7));
1046 *tab |= mask;
1047 }
1048 }
1049}
1050
1051static void build_page_bitmap(PageDesc *p)
1052{
1053 int n, tb_start, tb_end;
1054 TranslationBlock *tb;
1055
1056 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
1057
1058 tb = p->first_tb;
1059 while (tb != NULL) {
1060 n = (intptr_t)tb & 3;
1061 tb = (TranslationBlock *)((intptr_t)tb & ~3);
1062 /* NOTE: this is subtle as a TB may span two physical pages */
1063 if (n == 0) {
1064 /* NOTE: tb_end may be after the end of the page, but
1065 it is not a problem */
1066 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1067 tb_end = tb_start + tb->size;
1068 if (tb_end > TARGET_PAGE_SIZE)
1069 tb_end = TARGET_PAGE_SIZE;
1070 } else {
1071 tb_start = 0;
1072 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1073 }
1074 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1075 tb = tb->page_next[n];
1076 }
1077}
1078
1079TranslationBlock *tb_gen_code(CPUState *env,
1080 target_ulong pc, target_ulong cs_base,
1081 int flags, int cflags)
1082{
1083 TranslationBlock *tb;
1084 uint8_t *tc_ptr;
1085 tb_page_addr_t phys_pc, phys_page2;
1086 target_ulong virt_page2;
1087 int code_gen_size;
1088
1089 phys_pc = get_page_addr_code(env, pc);
1090 tb = tb_alloc(pc);
1091 if (!tb) {
1092 /* flush must be done */
1093 tb_flush(env);
1094 /* cannot fail at this point */
1095 tb = tb_alloc(pc);
1096 /* Don't forget to invalidate previous TB info. */
1097 tb_invalidated_flag = 1;
1098 }
1099 tc_ptr = code_gen_ptr;
1100 tb->tc_ptr = tc_ptr;
1101 tb->cs_base = cs_base;
1102 tb->flags = flags;
1103 tb->cflags = cflags;
1104 cpu_gen_code(env, tb, &code_gen_size);
1105 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1106
1107 /* check next page if needed */
1108 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1109 phys_page2 = -1;
1110 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1111 phys_page2 = get_page_addr_code(env, virt_page2);
1112 }
1113 tb_link_page(tb, phys_pc, phys_page2);
1114 return tb;
1115}
1116
1117/* invalidate all TBs which intersect with the target physical page
1118 starting in range [start;end[. NOTE: start and end must refer to
1119 the same physical page. 'is_cpu_write_access' should be true if called
1120 from a real cpu write access: the virtual CPU will exit the current
1121 TB if code is modified inside this TB. */
1122void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1123 int is_cpu_write_access)
1124{
1125 TranslationBlock *tb, *tb_next, *saved_tb;
1126 CPUState *env = cpu_single_env;
1127 tb_page_addr_t tb_start, tb_end;
1128 PageDesc *p;
1129 int n;
1130#ifdef TARGET_HAS_PRECISE_SMC
1131 int current_tb_not_found = is_cpu_write_access;
1132 TranslationBlock *current_tb = NULL;
1133 int current_tb_modified = 0;
1134 target_ulong current_pc = 0;
1135 target_ulong current_cs_base = 0;
1136 int current_flags = 0;
1137#endif /* TARGET_HAS_PRECISE_SMC */
1138
1139 p = page_find(start >> TARGET_PAGE_BITS);
1140 if (!p)
1141 return;
1142 if (!p->code_bitmap &&
1143 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1144 is_cpu_write_access) {
1145 /* build code bitmap */
1146 build_page_bitmap(p);
1147 }
1148
1149 /* we remove all the TBs in the range [start, end[ */
1150 /* XXX: see if in some cases it could be faster to invalidate all the code */
1151 tb = p->first_tb;
1152 while (tb != NULL) {
1153 n = (intptr_t)tb & 3;
1154 tb = (TranslationBlock *)((intptr_t)tb & ~3);
1155 tb_next = tb->page_next[n];
1156 /* NOTE: this is subtle as a TB may span two physical pages */
1157 if (n == 0) {
1158 /* NOTE: tb_end may be after the end of the page, but
1159 it is not a problem */
1160 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1161 tb_end = tb_start + tb->size;
1162 } else {
1163 tb_start = tb->page_addr[1];
1164 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1165 }
1166 if (!(tb_end <= start || tb_start >= end)) {
1167#ifdef TARGET_HAS_PRECISE_SMC
1168 if (current_tb_not_found) {
1169 current_tb_not_found = 0;
1170 current_tb = NULL;
1171 if (env->mem_io_pc) {
1172 /* now we have a real cpu fault */
1173 current_tb = tb_find_pc(env->mem_io_pc);
1174 }
1175 }
1176 if (current_tb == tb &&
1177 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1178 /* If we are modifying the current TB, we must stop
1179 its execution. We could be more precise by checking
1180 that the modification is after the current PC, but it
1181 would require a specialized function to partially
1182 restore the CPU state */
1183
1184 current_tb_modified = 1;
1185 cpu_restore_state(current_tb, env,
1186 env->mem_io_pc, NULL);
1187 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1188 &current_flags);
1189 }
1190#endif /* TARGET_HAS_PRECISE_SMC */
1191 /* we need to do that to handle the case where a signal
1192 occurs while doing tb_phys_invalidate() */
1193 saved_tb = NULL;
1194 if (env) {
1195 saved_tb = env->current_tb;
1196 env->current_tb = NULL;
1197 }
1198 tb_phys_invalidate(tb, -1);
1199 if (env) {
1200 env->current_tb = saved_tb;
1201 if (env->interrupt_request && env->current_tb)
1202 cpu_interrupt(env, env->interrupt_request);
1203 }
1204 }
1205 tb = tb_next;
1206 }
1207#if !defined(CONFIG_USER_ONLY)
1208 /* if no code remaining, no need to continue to use slow writes */
1209 if (!p->first_tb) {
1210 invalidate_page_bitmap(p);
1211 if (is_cpu_write_access) {
1212 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1213 }
1214 }
1215#endif
1216#ifdef TARGET_HAS_PRECISE_SMC
1217 if (current_tb_modified) {
1218 /* we generate a block containing just the instruction
1219 modifying the memory. It will ensure that it cannot modify
1220 itself */
1221 env->current_tb = NULL;
1222 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1223 cpu_resume_from_signal(env, NULL);
1224 }
1225#endif
1226}
1227
1228/* len must be <= 8 and start must be a multiple of len */
1229static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1230{
1231 PageDesc *p;
1232 int offset, b;
1233#if 0
1234 if (1) {
1235 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1236 cpu_single_env->mem_io_vaddr, len,
1237 cpu_single_env->eip,
1238 cpu_single_env->eip + (intptr_t)cpu_single_env->segs[R_CS].base);
1239 }
1240#endif
1241 p = page_find(start >> TARGET_PAGE_BITS);
1242 if (!p)
1243 return;
1244 if (p->code_bitmap) {
1245 offset = start & ~TARGET_PAGE_MASK;
1246 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1247 if (b & ((1 << len) - 1))
1248 goto do_invalidate;
1249 } else {
1250 do_invalidate:
1251 tb_invalidate_phys_page_range(start, start + len, 1);
1252 }
1253}
1254
1255#if !defined(CONFIG_SOFTMMU)
1256static void tb_invalidate_phys_page(tb_page_addr_t addr,
1257 uintptr_t pc, void *puc)
1258{
1259 TranslationBlock *tb;
1260 PageDesc *p;
1261 int n;
1262#ifdef TARGET_HAS_PRECISE_SMC
1263 TranslationBlock *current_tb = NULL;
1264 CPUState *env = cpu_single_env;
1265 int current_tb_modified = 0;
1266 target_ulong current_pc = 0;
1267 target_ulong current_cs_base = 0;
1268 int current_flags = 0;
1269#endif
1270
1271 addr &= TARGET_PAGE_MASK;
1272 p = page_find(addr >> TARGET_PAGE_BITS);
1273 if (!p)
1274 return;
1275 tb = p->first_tb;
1276#ifdef TARGET_HAS_PRECISE_SMC
1277 if (tb && pc != 0) {
1278 current_tb = tb_find_pc(pc);
1279 }
1280#endif
1281 while (tb != NULL) {
1282 n = (intptr_t)tb & 3;
1283 tb = (TranslationBlock *)((intptr_t)tb & ~3);
1284#ifdef TARGET_HAS_PRECISE_SMC
1285 if (current_tb == tb &&
1286 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1287 /* If we are modifying the current TB, we must stop
1288 its execution. We could be more precise by checking
1289 that the modification is after the current PC, but it
1290 would require a specialized function to partially
1291 restore the CPU state */
1292
1293 current_tb_modified = 1;
1294 cpu_restore_state(current_tb, env, pc, puc);
1295 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1296 &current_flags);
1297 }
1298#endif /* TARGET_HAS_PRECISE_SMC */
1299 tb_phys_invalidate(tb, addr);
1300 tb = tb->page_next[n];
1301 }
1302 p->first_tb = NULL;
1303#ifdef TARGET_HAS_PRECISE_SMC
1304 if (current_tb_modified) {
1305 /* we generate a block containing just the instruction
1306 modifying the memory. It will ensure that it cannot modify
1307 itself */
1308 env->current_tb = NULL;
1309 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1310 cpu_resume_from_signal(env, puc);
1311 }
1312#endif
1313}
1314#endif
1315
1316/* add the tb in the target page and protect it if necessary */
1317static inline void tb_alloc_page(TranslationBlock *tb,
1318 unsigned int n, tb_page_addr_t page_addr)
1319{
1320 PageDesc *p;
1321 TranslationBlock *last_first_tb;
1322
1323 tb->page_addr[n] = page_addr;
1324 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1325 tb->page_next[n] = p->first_tb;
1326 last_first_tb = p->first_tb;
1327 p->first_tb = (TranslationBlock *)((intptr_t)tb | n);
1328 invalidate_page_bitmap(p);
1329
1330#if defined(TARGET_HAS_SMC) || 1
1331
1332#if defined(CONFIG_USER_ONLY)
1333 if (p->flags & PAGE_WRITE) {
1334 target_ulong addr;
1335 PageDesc *p2;
1336 int prot;
1337
1338 /* force the host page as non writable (writes will have a
1339 page fault + mprotect overhead) */
1340 page_addr &= qemu_host_page_mask;
1341 prot = 0;
1342 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1343 addr += TARGET_PAGE_SIZE) {
1344
1345 p2 = page_find (addr >> TARGET_PAGE_BITS);
1346 if (!p2)
1347 continue;
1348 prot |= p2->flags;
1349 p2->flags &= ~PAGE_WRITE;
1350 }
1351 mprotect(g2h(page_addr), qemu_host_page_size,
1352 (prot & PAGE_BITS) & ~PAGE_WRITE);
1353#ifdef DEBUG_TB_INVALIDATE
1354 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1355 page_addr);
1356#endif
1357 }
1358#else
1359 /* if some code is already present, then the pages are already
1360 protected. So we handle the case where only the first TB is
1361 allocated in a physical page */
1362 if (!last_first_tb) {
1363 tlb_protect_code(page_addr);
1364 }
1365#endif
1366
1367#endif /* TARGET_HAS_SMC */
1368}
1369
1370/* Allocate a new translation block. Flush the translation buffer if
1371 too many translation blocks or too much generated code. */
1372TranslationBlock *tb_alloc(target_ulong pc)
1373{
1374 TranslationBlock *tb;
1375
1376 if (nb_tbs >= code_gen_max_blocks ||
1377 (code_gen_ptr - code_gen_buffer) >= VBOX_ONLY((uintptr_t))code_gen_buffer_max_size)
1378 return NULL;
1379 tb = &tbs[nb_tbs++];
1380 tb->pc = pc;
1381 tb->cflags = 0;
1382 return tb;
1383}
1384
1385void tb_free(TranslationBlock *tb)
1386{
1387 /* In practice this is mostly used for single use temporary TB
1388 Ignore the hard cases and just back up if this TB happens to
1389 be the last one generated. */
1390 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1391 code_gen_ptr = tb->tc_ptr;
1392 nb_tbs--;
1393 }
1394}
1395
1396/* add a new TB and link it to the physical page tables. phys_page2 is
1397 (-1) to indicate that only one page contains the TB. */
1398void tb_link_page(TranslationBlock *tb,
1399 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1400{
1401 unsigned int h;
1402 TranslationBlock **ptb;
1403
1404 /* Grab the mmap lock to stop another thread invalidating this TB
1405 before we are done. */
1406 mmap_lock();
1407 /* add in the physical hash table */
1408 h = tb_phys_hash_func(phys_pc);
1409 ptb = &tb_phys_hash[h];
1410 tb->phys_hash_next = *ptb;
1411 *ptb = tb;
1412
1413 /* add in the page list */
1414 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1415 if (phys_page2 != -1)
1416 tb_alloc_page(tb, 1, phys_page2);
1417 else
1418 tb->page_addr[1] = -1;
1419
1420 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
1421 tb->jmp_next[0] = NULL;
1422 tb->jmp_next[1] = NULL;
1423
1424 /* init original jump addresses */
1425 if (tb->tb_next_offset[0] != 0xffff)
1426 tb_reset_jump(tb, 0);
1427 if (tb->tb_next_offset[1] != 0xffff)
1428 tb_reset_jump(tb, 1);
1429
1430#ifdef DEBUG_TB_CHECK
1431 tb_page_check();
1432#endif
1433 mmap_unlock();
1434}
1435
1436/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1437 tb[1].tc_ptr. Return NULL if not found */
1438TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1439{
1440 int m_min, m_max, m;
1441 uintptr_t v;
1442 TranslationBlock *tb;
1443
1444 if (nb_tbs <= 0)
1445 return NULL;
1446 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1447 tc_ptr >= (uintptr_t)code_gen_ptr)
1448 return NULL;
1449 /* binary search (cf Knuth) */
1450 m_min = 0;
1451 m_max = nb_tbs - 1;
1452 while (m_min <= m_max) {
1453 m = (m_min + m_max) >> 1;
1454 tb = &tbs[m];
1455 v = (uintptr_t)tb->tc_ptr;
1456 if (v == tc_ptr)
1457 return tb;
1458 else if (tc_ptr < v) {
1459 m_max = m - 1;
1460 } else {
1461 m_min = m + 1;
1462 }
1463 }
1464 return &tbs[m_max];
1465}
1466
1467static void tb_reset_jump_recursive(TranslationBlock *tb);
1468
1469static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1470{
1471 TranslationBlock *tb1, *tb_next, **ptb;
1472 unsigned int n1;
1473
1474 tb1 = tb->jmp_next[n];
1475 if (tb1 != NULL) {
1476 /* find head of list */
1477 for(;;) {
1478 n1 = (intptr_t)tb1 & 3;
1479 tb1 = (TranslationBlock *)((intptr_t)tb1 & ~3);
1480 if (n1 == 2)
1481 break;
1482 tb1 = tb1->jmp_next[n1];
1483 }
1484 /* we are now sure now that tb jumps to tb1 */
1485 tb_next = tb1;
1486
1487 /* remove tb from the jmp_first list */
1488 ptb = &tb_next->jmp_first;
1489 for(;;) {
1490 tb1 = *ptb;
1491 n1 = (intptr_t)tb1 & 3;
1492 tb1 = (TranslationBlock *)((intptr_t)tb1 & ~3);
1493 if (n1 == n && tb1 == tb)
1494 break;
1495 ptb = &tb1->jmp_next[n1];
1496 }
1497 *ptb = tb->jmp_next[n];
1498 tb->jmp_next[n] = NULL;
1499
1500 /* suppress the jump to next tb in generated code */
1501 tb_reset_jump(tb, n);
1502
1503 /* suppress jumps in the tb on which we could have jumped */
1504 tb_reset_jump_recursive(tb_next);
1505 }
1506}
1507
1508static void tb_reset_jump_recursive(TranslationBlock *tb)
1509{
1510 tb_reset_jump_recursive2(tb, 0);
1511 tb_reset_jump_recursive2(tb, 1);
1512}
1513
1514#if defined(TARGET_HAS_ICE)
1515#if defined(CONFIG_USER_ONLY)
1516static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1517{
1518 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1519}
1520#else
1521static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1522{
1523 target_phys_addr_t addr;
1524 target_ulong pd;
1525 ram_addr_t ram_addr;
1526 PhysPageDesc *p;
1527
1528 addr = cpu_get_phys_page_debug(env, pc);
1529 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1530 if (!p) {
1531 pd = IO_MEM_UNASSIGNED;
1532 } else {
1533 pd = p->phys_offset;
1534 }
1535 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1536 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1537}
1538#endif
1539#endif /* TARGET_HAS_ICE */
1540
1541#if defined(CONFIG_USER_ONLY)
1542void cpu_watchpoint_remove_all(CPUState *env, int mask)
1543
1544{
1545}
1546
1547int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1548 int flags, CPUWatchpoint **watchpoint)
1549{
1550 return -ENOSYS;
1551}
1552#else
1553/* Add a watchpoint. */
1554int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1555 int flags, CPUWatchpoint **watchpoint)
1556{
1557 target_ulong len_mask = ~(len - 1);
1558 CPUWatchpoint *wp;
1559
1560 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1561 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1562 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1563 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1564#ifndef VBOX
1565 return -EINVAL;
1566#else
1567 return VERR_INVALID_PARAMETER;
1568#endif
1569 }
1570 wp = qemu_malloc(sizeof(*wp));
1571
1572 wp->vaddr = addr;
1573 wp->len_mask = len_mask;
1574 wp->flags = flags;
1575
1576 /* keep all GDB-injected watchpoints in front */
1577 if (flags & BP_GDB)
1578 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1579 else
1580 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1581
1582 tlb_flush_page(env, addr);
1583
1584 if (watchpoint)
1585 *watchpoint = wp;
1586 return 0;
1587}
1588
1589/* Remove a specific watchpoint. */
1590int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1591 int flags)
1592{
1593 target_ulong len_mask = ~(len - 1);
1594 CPUWatchpoint *wp;
1595
1596 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1597 if (addr == wp->vaddr && len_mask == wp->len_mask
1598 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1599 cpu_watchpoint_remove_by_ref(env, wp);
1600 return 0;
1601 }
1602 }
1603#ifndef VBOX
1604 return -ENOENT;
1605#else
1606 return VERR_NOT_FOUND;
1607#endif
1608}
1609
1610/* Remove a specific watchpoint by reference. */
1611void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1612{
1613 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1614
1615 tlb_flush_page(env, watchpoint->vaddr);
1616
1617 qemu_free(watchpoint);
1618}
1619
1620/* Remove all matching watchpoints. */
1621void cpu_watchpoint_remove_all(CPUState *env, int mask)
1622{
1623 CPUWatchpoint *wp, *next;
1624
1625 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1626 if (wp->flags & mask)
1627 cpu_watchpoint_remove_by_ref(env, wp);
1628 }
1629}
1630#endif
1631
1632/* Add a breakpoint. */
1633int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1634 CPUBreakpoint **breakpoint)
1635{
1636#if defined(TARGET_HAS_ICE)
1637 CPUBreakpoint *bp;
1638
1639 bp = qemu_malloc(sizeof(*bp));
1640
1641 bp->pc = pc;
1642 bp->flags = flags;
1643
1644 /* keep all GDB-injected breakpoints in front */
1645 if (flags & BP_GDB)
1646 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1647 else
1648 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1649
1650 breakpoint_invalidate(env, pc);
1651
1652 if (breakpoint)
1653 *breakpoint = bp;
1654 return 0;
1655#else
1656 return -ENOSYS;
1657#endif
1658}
1659
1660/* Remove a specific breakpoint. */
1661int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1662{
1663#if defined(TARGET_HAS_ICE)
1664 CPUBreakpoint *bp;
1665
1666 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1667 if (bp->pc == pc && bp->flags == flags) {
1668 cpu_breakpoint_remove_by_ref(env, bp);
1669 return 0;
1670 }
1671 }
1672# ifndef VBOX
1673 return -ENOENT;
1674# else
1675 return VERR_NOT_FOUND;
1676# endif
1677#else
1678 return -ENOSYS;
1679#endif
1680}
1681
1682/* Remove a specific breakpoint by reference. */
1683void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1684{
1685#if defined(TARGET_HAS_ICE)
1686 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1687
1688 breakpoint_invalidate(env, breakpoint->pc);
1689
1690 qemu_free(breakpoint);
1691#endif
1692}
1693
1694/* Remove all matching breakpoints. */
1695void cpu_breakpoint_remove_all(CPUState *env, int mask)
1696{
1697#if defined(TARGET_HAS_ICE)
1698 CPUBreakpoint *bp, *next;
1699
1700 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1701 if (bp->flags & mask)
1702 cpu_breakpoint_remove_by_ref(env, bp);
1703 }
1704#endif
1705}
1706
1707/* enable or disable single step mode. EXCP_DEBUG is returned by the
1708 CPU loop after each instruction */
1709void cpu_single_step(CPUState *env, int enabled)
1710{
1711#if defined(TARGET_HAS_ICE)
1712 if (env->singlestep_enabled != enabled) {
1713 env->singlestep_enabled = enabled;
1714 if (kvm_enabled())
1715 kvm_update_guest_debug(env, 0);
1716 else {
1717 /* must flush all the translated code to avoid inconsistencies */
1718 /* XXX: only flush what is necessary */
1719 tb_flush(env);
1720 }
1721 }
1722#endif
1723}
1724
1725#ifndef VBOX
1726
1727/* enable or disable low levels log */
1728void cpu_set_log(int log_flags)
1729{
1730 loglevel = log_flags;
1731 if (loglevel && !logfile) {
1732 logfile = fopen(logfilename, log_append ? "a" : "w");
1733 if (!logfile) {
1734 perror(logfilename);
1735 _exit(1);
1736 }
1737#if !defined(CONFIG_SOFTMMU)
1738 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1739 {
1740 static char logfile_buf[4096];
1741 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1742 }
1743#elif !defined(_WIN32)
1744 /* Win32 doesn't support line-buffering and requires size >= 2 */
1745 setvbuf(logfile, NULL, _IOLBF, 0);
1746#endif
1747 log_append = 1;
1748 }
1749 if (!loglevel && logfile) {
1750 fclose(logfile);
1751 logfile = NULL;
1752 }
1753}
1754
1755void cpu_set_log_filename(const char *filename)
1756{
1757 logfilename = strdup(filename);
1758 if (logfile) {
1759 fclose(logfile);
1760 logfile = NULL;
1761 }
1762 cpu_set_log(loglevel);
1763}
1764
1765#endif /* !VBOX */
1766
1767static void cpu_unlink_tb(CPUState *env)
1768{
1769 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1770 problem and hope the cpu will stop of its own accord. For userspace
1771 emulation this often isn't actually as bad as it sounds. Often
1772 signals are used primarily to interrupt blocking syscalls. */
1773 TranslationBlock *tb;
1774 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1775
1776 spin_lock(&interrupt_lock);
1777 tb = env->current_tb;
1778 /* if the cpu is currently executing code, we must unlink it and
1779 all the potentially executing TB */
1780 if (tb) {
1781 env->current_tb = NULL;
1782 tb_reset_jump_recursive(tb);
1783 }
1784 spin_unlock(&interrupt_lock);
1785}
1786
1787/* mask must never be zero, except for A20 change call */
1788void cpu_interrupt(CPUState *env, int mask)
1789{
1790 int old_mask;
1791
1792 old_mask = env->interrupt_request;
1793#ifndef VBOX
1794 env->interrupt_request |= mask;
1795#else /* VBOX */
1796 VM_ASSERT_EMT(env->pVM);
1797 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1798#endif /* VBOX */
1799
1800#ifndef VBOX
1801#ifndef CONFIG_USER_ONLY
1802 /*
1803 * If called from iothread context, wake the target cpu in
1804 * case its halted.
1805 */
1806 if (!qemu_cpu_self(env)) {
1807 qemu_cpu_kick(env);
1808 return;
1809 }
1810#endif
1811#endif /* !VBOX */
1812
1813 if (use_icount) {
1814 env->icount_decr.u16.high = 0xffff;
1815#ifndef CONFIG_USER_ONLY
1816 if (!can_do_io(env)
1817 && (mask & ~old_mask) != 0) {
1818 cpu_abort(env, "Raised interrupt while not in I/O function");
1819 }
1820#endif
1821 } else {
1822 cpu_unlink_tb(env);
1823 }
1824}
1825
1826void cpu_reset_interrupt(CPUState *env, int mask)
1827{
1828#ifdef VBOX
1829 /*
1830 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1831 * for future changes!
1832 */
1833 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1834#else /* !VBOX */
1835 env->interrupt_request &= ~mask;
1836#endif /* !VBOX */
1837}
1838
1839void cpu_exit(CPUState *env)
1840{
1841 env->exit_request = 1;
1842 cpu_unlink_tb(env);
1843}
1844
1845#ifndef VBOX
1846const CPULogItem cpu_log_items[] = {
1847 { CPU_LOG_TB_OUT_ASM, "out_asm",
1848 "show generated host assembly code for each compiled TB" },
1849 { CPU_LOG_TB_IN_ASM, "in_asm",
1850 "show target assembly code for each compiled TB" },
1851 { CPU_LOG_TB_OP, "op",
1852 "show micro ops for each compiled TB" },
1853 { CPU_LOG_TB_OP_OPT, "op_opt",
1854 "show micro ops "
1855#ifdef TARGET_I386
1856 "before eflags optimization and "
1857#endif
1858 "after liveness analysis" },
1859 { CPU_LOG_INT, "int",
1860 "show interrupts/exceptions in short format" },
1861 { CPU_LOG_EXEC, "exec",
1862 "show trace before each executed TB (lots of logs)" },
1863 { CPU_LOG_TB_CPU, "cpu",
1864 "show CPU state before block translation" },
1865#ifdef TARGET_I386
1866 { CPU_LOG_PCALL, "pcall",
1867 "show protected mode far calls/returns/exceptions" },
1868 { CPU_LOG_RESET, "cpu_reset",
1869 "show CPU state before CPU resets" },
1870#endif
1871#ifdef DEBUG_IOPORT
1872 { CPU_LOG_IOPORT, "ioport",
1873 "show all i/o ports accesses" },
1874#endif
1875 { 0, NULL, NULL },
1876};
1877
1878#ifndef CONFIG_USER_ONLY
1879static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1880 = QLIST_HEAD_INITIALIZER(memory_client_list);
1881
1882static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1883 ram_addr_t size,
1884 ram_addr_t phys_offset)
1885{
1886 CPUPhysMemoryClient *client;
1887 QLIST_FOREACH(client, &memory_client_list, list) {
1888 client->set_memory(client, start_addr, size, phys_offset);
1889 }
1890}
1891
1892static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1893 target_phys_addr_t end)
1894{
1895 CPUPhysMemoryClient *client;
1896 QLIST_FOREACH(client, &memory_client_list, list) {
1897 int r = client->sync_dirty_bitmap(client, start, end);
1898 if (r < 0)
1899 return r;
1900 }
1901 return 0;
1902}
1903
1904static int cpu_notify_migration_log(int enable)
1905{
1906 CPUPhysMemoryClient *client;
1907 QLIST_FOREACH(client, &memory_client_list, list) {
1908 int r = client->migration_log(client, enable);
1909 if (r < 0)
1910 return r;
1911 }
1912 return 0;
1913}
1914
1915static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1916 int level, void **lp)
1917{
1918 int i;
1919
1920 if (*lp == NULL) {
1921 return;
1922 }
1923 if (level == 0) {
1924 PhysPageDesc *pd = *lp;
1925 for (i = 0; i < L2_SIZE; ++i) {
1926 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1927 client->set_memory(client, pd[i].region_offset,
1928 TARGET_PAGE_SIZE, pd[i].phys_offset);
1929 }
1930 }
1931 } else {
1932 void **pp = *lp;
1933 for (i = 0; i < L2_SIZE; ++i) {
1934 phys_page_for_each_1(client, level - 1, pp + i);
1935 }
1936 }
1937}
1938
1939static void phys_page_for_each(CPUPhysMemoryClient *client)
1940{
1941 int i;
1942 for (i = 0; i < P_L1_SIZE; ++i) {
1943 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1944 l1_phys_map + 1);
1945 }
1946}
1947
1948void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1949{
1950 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1951 phys_page_for_each(client);
1952}
1953
1954void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1955{
1956 QLIST_REMOVE(client, list);
1957}
1958#endif
1959
1960static int cmp1(const char *s1, int n, const char *s2)
1961{
1962 if (strlen(s2) != n)
1963 return 0;
1964 return memcmp(s1, s2, n) == 0;
1965}
1966
1967/* takes a comma separated list of log masks. Return 0 if error. */
1968int cpu_str_to_log_mask(const char *str)
1969{
1970 const CPULogItem *item;
1971 int mask;
1972 const char *p, *p1;
1973
1974 p = str;
1975 mask = 0;
1976 for(;;) {
1977 p1 = strchr(p, ',');
1978 if (!p1)
1979 p1 = p + strlen(p);
1980 if(cmp1(p,p1-p,"all")) {
1981 for(item = cpu_log_items; item->mask != 0; item++) {
1982 mask |= item->mask;
1983 }
1984 } else {
1985 for(item = cpu_log_items; item->mask != 0; item++) {
1986 if (cmp1(p, p1 - p, item->name))
1987 goto found;
1988 }
1989 return 0;
1990 }
1991 found:
1992 mask |= item->mask;
1993 if (*p1 != ',')
1994 break;
1995 p = p1 + 1;
1996 }
1997 return mask;
1998}
1999
2000void cpu_abort(CPUState *env, const char *fmt, ...)
2001{
2002 va_list ap;
2003 va_list ap2;
2004
2005 va_start(ap, fmt);
2006 va_copy(ap2, ap);
2007 fprintf(stderr, "qemu: fatal: ");
2008 vfprintf(stderr, fmt, ap);
2009 fprintf(stderr, "\n");
2010#ifdef TARGET_I386
2011 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
2012#else
2013 cpu_dump_state(env, stderr, fprintf, 0);
2014#endif
2015 if (qemu_log_enabled()) {
2016 qemu_log("qemu: fatal: ");
2017 qemu_log_vprintf(fmt, ap2);
2018 qemu_log("\n");
2019#ifdef TARGET_I386
2020 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
2021#else
2022 log_cpu_state(env, 0);
2023#endif
2024 qemu_log_flush();
2025 qemu_log_close();
2026 }
2027 va_end(ap2);
2028 va_end(ap);
2029#if defined(CONFIG_USER_ONLY)
2030 {
2031 struct sigaction act;
2032 sigfillset(&act.sa_mask);
2033 act.sa_handler = SIG_DFL;
2034 sigaction(SIGABRT, &act, NULL);
2035 }
2036#endif
2037 abort();
2038}
2039
2040CPUState *cpu_copy(CPUState *env)
2041{
2042 CPUState *new_env = cpu_init(env->cpu_model_str);
2043 CPUState *next_cpu = new_env->next_cpu;
2044 int cpu_index = new_env->cpu_index;
2045#if defined(TARGET_HAS_ICE)
2046 CPUBreakpoint *bp;
2047 CPUWatchpoint *wp;
2048#endif
2049
2050 memcpy(new_env, env, sizeof(CPUState));
2051
2052 /* Preserve chaining and index. */
2053 new_env->next_cpu = next_cpu;
2054 new_env->cpu_index = cpu_index;
2055
2056 /* Clone all break/watchpoints.
2057 Note: Once we support ptrace with hw-debug register access, make sure
2058 BP_CPU break/watchpoints are handled correctly on clone. */
2059 QTAILQ_INIT(&env->breakpoints);
2060 QTAILQ_INIT(&env->watchpoints);
2061#if defined(TARGET_HAS_ICE)
2062 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2063 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
2064 }
2065 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2066 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
2067 wp->flags, NULL);
2068 }
2069#endif
2070
2071 return new_env;
2072}
2073
2074#endif /* !VBOX */
2075#if !defined(CONFIG_USER_ONLY)
2076
2077static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
2078{
2079 unsigned int i;
2080
2081 /* Discard jump cache entries for any tb which might potentially
2082 overlap the flushed page. */
2083 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
2084 memset (&env->tb_jmp_cache[i], 0,
2085 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
2086
2087 i = tb_jmp_cache_hash_page(addr);
2088 memset (&env->tb_jmp_cache[i], 0,
2089 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
2090#ifdef VBOX
2091
2092 /* inform raw mode about TLB page flush */
2093 remR3FlushPage(env, addr);
2094#endif /* VBOX */
2095}
2096
2097static CPUTLBEntry s_cputlb_empty_entry = {
2098 .addr_read = -1,
2099 .addr_write = -1,
2100 .addr_code = -1,
2101 .addend = -1,
2102};
2103
2104/* NOTE: if flush_global is true, also flush global entries (not
2105 implemented yet) */
2106void tlb_flush(CPUState *env, int flush_global)
2107{
2108 int i;
2109
2110#ifdef VBOX
2111 Assert(EMRemIsLockOwner(env->pVM));
2112 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_FLUSH_TLB);
2113#endif
2114
2115#if defined(DEBUG_TLB)
2116 printf("tlb_flush:\n");
2117#endif
2118 /* must reset current TB so that interrupts cannot modify the
2119 links while we are modifying them */
2120 env->current_tb = NULL;
2121
2122 for(i = 0; i < CPU_TLB_SIZE; i++) {
2123 int mmu_idx;
2124 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2125 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
2126 }
2127 }
2128
2129 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
2130
2131 env->tlb_flush_addr = -1;
2132 env->tlb_flush_mask = 0;
2133 tlb_flush_count++;
2134#ifdef VBOX
2135
2136 /* inform raw mode about TLB flush */
2137 remR3FlushTLB(env, flush_global);
2138#endif /* VBOX */
2139}
2140
2141static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
2142{
2143 if (addr == (tlb_entry->addr_read &
2144 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2145 addr == (tlb_entry->addr_write &
2146 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2147 addr == (tlb_entry->addr_code &
2148 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
2149 *tlb_entry = s_cputlb_empty_entry;
2150 }
2151}
2152
2153void tlb_flush_page(CPUState *env, target_ulong addr)
2154{
2155 int i;
2156 int mmu_idx;
2157
2158 Assert(EMRemIsLockOwner(env->pVM));
2159#if defined(DEBUG_TLB)
2160 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
2161#endif
2162 /* Check if we need to flush due to large pages. */
2163 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2164#if defined(DEBUG_TLB)
2165 printf("tlb_flush_page: forced full flush ("
2166 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2167 env->tlb_flush_addr, env->tlb_flush_mask);
2168#endif
2169 tlb_flush(env, 1);
2170 return;
2171 }
2172 /* must reset current TB so that interrupts cannot modify the
2173 links while we are modifying them */
2174 env->current_tb = NULL;
2175
2176 addr &= TARGET_PAGE_MASK;
2177 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2178 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2179 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
2180
2181 tlb_flush_jmp_cache(env, addr);
2182}
2183
2184/* update the TLBs so that writes to code in the virtual page 'addr'
2185 can be detected */
2186static void tlb_protect_code(ram_addr_t ram_addr)
2187{
2188 cpu_physical_memory_reset_dirty(ram_addr,
2189 ram_addr + TARGET_PAGE_SIZE,
2190 CODE_DIRTY_FLAG);
2191#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
2192 /** @todo Retest this? This function has changed... */
2193 remR3ProtectCode(cpu_single_env, ram_addr);
2194#endif /* VBOX */
2195}
2196
2197/* update the TLB so that writes in physical page 'phys_addr' are no longer
2198 tested for self modifying code */
2199static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2200 target_ulong vaddr)
2201{
2202 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2203}
2204
2205static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2206 uintptr_t start, uintptr_t length)
2207{
2208 uintptr_t addr;
2209#ifdef VBOX
2210
2211 if (start & 3)
2212 return;
2213#endif /* VBOX */
2214 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2215 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2216 if ((addr - start) < length) {
2217 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2218 }
2219 }
2220}
2221
2222/* Note: start and end must be within the same ram block. */
2223void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2224 int dirty_flags)
2225{
2226 CPUState *env;
2227 uintptr_t length, start1;
2228 int i;
2229
2230 start &= TARGET_PAGE_MASK;
2231 end = TARGET_PAGE_ALIGN(end);
2232
2233 length = end - start;
2234 if (length == 0)
2235 return;
2236 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2237
2238 /* we modify the TLB cache so that the dirty bit will be set again
2239 when accessing the range */
2240#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2241 start1 = start;
2242#elif !defined(VBOX)
2243 start1 = (uintptr_t)qemu_get_ram_ptr(start);
2244 /* Chek that we don't span multiple blocks - this breaks the
2245 address comparisons below. */
2246 if ((uintptr_t)qemu_get_ram_ptr(end - 1) - start1
2247 != (end - 1) - start) {
2248 abort();
2249 }
2250#else
2251 start1 = (uintptr_t)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */
2252#endif
2253
2254 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2255 int mmu_idx;
2256 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2257 for(i = 0; i < CPU_TLB_SIZE; i++)
2258 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2259 start1, length);
2260 }
2261 }
2262}
2263
2264#ifndef VBOX
2265
2266int cpu_physical_memory_set_dirty_tracking(int enable)
2267{
2268 int ret = 0;
2269 in_migration = enable;
2270 ret = cpu_notify_migration_log(!!enable);
2271 return ret;
2272}
2273
2274int cpu_physical_memory_get_dirty_tracking(void)
2275{
2276 return in_migration;
2277}
2278
2279#endif /* !VBOX */
2280
2281int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2282 target_phys_addr_t end_addr)
2283{
2284#ifndef VBOX
2285 int ret;
2286
2287 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2288 return ret;
2289#else /* VBOX */
2290 return 0;
2291#endif /* VBOX */
2292}
2293
2294#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2295DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend)
2296#else
2297static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2298#endif
2299{
2300 ram_addr_t ram_addr;
2301#ifndef VBOX
2302 void *p;
2303#endif
2304
2305 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2306#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2307 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2308#elif !defined(VBOX)
2309 p = (void *)(uintptr_t)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2310 + tlb_entry->addend);
2311 ram_addr = qemu_ram_addr_from_host(p);
2312#else
2313 Assert(phys_addend != -1);
2314 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend;
2315#endif
2316 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2317 tlb_entry->addr_write |= TLB_NOTDIRTY;
2318 }
2319 }
2320}
2321
2322/* update the TLB according to the current state of the dirty bits */
2323void cpu_tlb_update_dirty(CPUState *env)
2324{
2325 int i;
2326 int mmu_idx;
2327 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2328 for(i = 0; i < CPU_TLB_SIZE; i++)
2329#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2330 tlb_update_dirty(&env->tlb_table[mmu_idx][i], env->phys_addends[mmu_idx][i]);
2331#else
2332 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2333#endif
2334 }
2335}
2336
2337static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2338{
2339 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2340 tlb_entry->addr_write = vaddr;
2341}
2342
2343/* update the TLB corresponding to virtual page vaddr
2344 so that it is no longer dirty */
2345static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2346{
2347 int i;
2348 int mmu_idx;
2349
2350 vaddr &= TARGET_PAGE_MASK;
2351 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2352 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2353 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2354}
2355
2356/* Our TLB does not support large pages, so remember the area covered by
2357 large pages and trigger a full TLB flush if these are invalidated. */
2358static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2359 target_ulong size)
2360{
2361 target_ulong mask = ~(size - 1);
2362
2363 if (env->tlb_flush_addr == (target_ulong)-1) {
2364 env->tlb_flush_addr = vaddr & mask;
2365 env->tlb_flush_mask = mask;
2366 return;
2367 }
2368 /* Extend the existing region to include the new page.
2369 This is a compromise between unnecessary flushes and the cost
2370 of maintaining a full variable size TLB. */
2371 mask &= env->tlb_flush_mask;
2372 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2373 mask <<= 1;
2374 }
2375 env->tlb_flush_addr &= mask;
2376 env->tlb_flush_mask = mask;
2377}
2378
2379/* Add a new TLB entry. At most one entry for a given virtual address
2380 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2381 supplied size is only used by tlb_flush_page. */
2382void tlb_set_page(CPUState *env, target_ulong vaddr,
2383 target_phys_addr_t paddr, int prot,
2384 int mmu_idx, target_ulong size)
2385{
2386 PhysPageDesc *p;
2387 ram_addr_t pd;
2388 unsigned int index;
2389 target_ulong address;
2390 target_ulong code_address;
2391 uintptr_t addend;
2392 CPUTLBEntry *te;
2393 CPUWatchpoint *wp;
2394 target_phys_addr_t iotlb;
2395#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2396 int read_mods = 0, write_mods = 0, code_mods = 0;
2397#endif
2398
2399 assert(size >= TARGET_PAGE_SIZE);
2400 if (size != TARGET_PAGE_SIZE) {
2401 tlb_add_large_page(env, vaddr, size);
2402 }
2403 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2404 if (!p) {
2405 pd = IO_MEM_UNASSIGNED;
2406 } else {
2407 pd = p->phys_offset;
2408 }
2409#if defined(DEBUG_TLB)
2410 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d size=" TARGET_FMT_lx " pd=0x%08lx\n",
2411 vaddr, (int)paddr, prot, mmu_idx, size, (long)pd);
2412#endif
2413
2414 address = vaddr;
2415 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2416 /* IO memory case (romd handled later) */
2417 address |= TLB_MMIO;
2418 }
2419#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2420 addend = pd & TARGET_PAGE_MASK;
2421#elif !defined(VBOX)
2422 addend = (uintptr_t)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2423#else
2424 /** @todo this is racing the phys_page_find call above since it may register
2425 * a new chunk of memory... */
2426 addend = (uintptr_t)remR3TlbGCPhys2Ptr(env, pd & TARGET_PAGE_MASK, !!(prot & PAGE_WRITE));
2427#endif
2428
2429 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2430 /* Normal RAM. */
2431 iotlb = pd & TARGET_PAGE_MASK;
2432 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2433 iotlb |= IO_MEM_NOTDIRTY;
2434 else
2435 iotlb |= IO_MEM_ROM;
2436 } else {
2437 /* IO handlers are currently passed a physical address.
2438 It would be nice to pass an offset from the base address
2439 of that region. This would avoid having to special case RAM,
2440 and avoid full address decoding in every device.
2441 We can't use the high bits of pd for this because
2442 IO_MEM_ROMD uses these as a ram address. */
2443 iotlb = (pd & ~TARGET_PAGE_MASK);
2444 if (p) {
2445 iotlb += p->region_offset;
2446 } else {
2447 iotlb += paddr;
2448 }
2449 }
2450
2451 code_address = address;
2452#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2453
2454 if (addend & 0x3)
2455 {
2456 if (addend & 0x2)
2457 {
2458 /* catch write */
2459 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2460 write_mods |= TLB_MMIO;
2461 }
2462 else if (addend & 0x1)
2463 {
2464 /* catch all */
2465 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2466 {
2467 read_mods |= TLB_MMIO;
2468 write_mods |= TLB_MMIO;
2469 code_mods |= TLB_MMIO;
2470 }
2471 }
2472 if ((iotlb & ~TARGET_PAGE_MASK) == 0)
2473 iotlb = env->pVM->rem.s.iHandlerMemType + paddr;
2474 addend &= ~(target_ulong)0x3;
2475 }
2476
2477#endif
2478 /* Make accesses to pages with watchpoints go via the
2479 watchpoint trap routines. */
2480 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2481 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2482 /* Avoid trapping reads of pages with a write breakpoint. */
2483 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2484 iotlb = io_mem_watch + paddr;
2485 address |= TLB_MMIO;
2486 break;
2487 }
2488 }
2489 }
2490
2491 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2492 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2493 te = &env->tlb_table[mmu_idx][index];
2494 te->addend = addend - vaddr;
2495 if (prot & PAGE_READ) {
2496 te->addr_read = address;
2497 } else {
2498 te->addr_read = -1;
2499 }
2500
2501 if (prot & PAGE_EXEC) {
2502 te->addr_code = code_address;
2503 } else {
2504 te->addr_code = -1;
2505 }
2506 if (prot & PAGE_WRITE) {
2507 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2508 (pd & IO_MEM_ROMD)) {
2509 /* Write access calls the I/O callback. */
2510 te->addr_write = address | TLB_MMIO;
2511 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2512 !cpu_physical_memory_is_dirty(pd)) {
2513 te->addr_write = address | TLB_NOTDIRTY;
2514 } else {
2515 te->addr_write = address;
2516 }
2517 } else {
2518 te->addr_write = -1;
2519 }
2520
2521#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2522 if (prot & PAGE_READ)
2523 te->addr_read |= read_mods;
2524 if (prot & PAGE_EXEC)
2525 te->addr_code |= code_mods;
2526 if (prot & PAGE_WRITE)
2527 te->addr_write |= write_mods;
2528
2529 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2530#endif
2531
2532#ifdef VBOX
2533 /* inform raw mode about TLB page change */
2534 remR3FlushPage(env, vaddr);
2535#endif
2536}
2537
2538#else
2539
2540void tlb_flush(CPUState *env, int flush_global)
2541{
2542}
2543
2544void tlb_flush_page(CPUState *env, target_ulong addr)
2545{
2546}
2547
2548/*
2549 * Walks guest process memory "regions" one by one
2550 * and calls callback function 'fn' for each region.
2551 */
2552
2553struct walk_memory_regions_data
2554{
2555 walk_memory_regions_fn fn;
2556 void *priv;
2557 uintptr_t start;
2558 int prot;
2559};
2560
2561static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2562 abi_ulong end, int new_prot)
2563{
2564 if (data->start != -1ul) {
2565 int rc = data->fn(data->priv, data->start, end, data->prot);
2566 if (rc != 0) {
2567 return rc;
2568 }
2569 }
2570
2571 data->start = (new_prot ? end : -1ul);
2572 data->prot = new_prot;
2573
2574 return 0;
2575}
2576
2577static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2578 abi_ulong base, int level, void **lp)
2579{
2580 abi_ulong pa;
2581 int i, rc;
2582
2583 if (*lp == NULL) {
2584 return walk_memory_regions_end(data, base, 0);
2585 }
2586
2587 if (level == 0) {
2588 PageDesc *pd = *lp;
2589 for (i = 0; i < L2_SIZE; ++i) {
2590 int prot = pd[i].flags;
2591
2592 pa = base | (i << TARGET_PAGE_BITS);
2593 if (prot != data->prot) {
2594 rc = walk_memory_regions_end(data, pa, prot);
2595 if (rc != 0) {
2596 return rc;
2597 }
2598 }
2599 }
2600 } else {
2601 void **pp = *lp;
2602 for (i = 0; i < L2_SIZE; ++i) {
2603 pa = base | ((abi_ulong)i <<
2604 (TARGET_PAGE_BITS + L2_BITS * level));
2605 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2606 if (rc != 0) {
2607 return rc;
2608 }
2609 }
2610 }
2611
2612 return 0;
2613}
2614
2615int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2616{
2617 struct walk_memory_regions_data data;
2618 target_ulong i;
2619
2620 data.fn = fn;
2621 data.priv = priv;
2622 data.start = -1ul;
2623 data.prot = 0;
2624
2625 for (i = 0; i < V_L1_SIZE; i++) {
2626 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2627 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2628 if (rc != 0) {
2629 return rc;
2630 }
2631 }
2632
2633 return walk_memory_regions_end(&data, 0, 0);
2634}
2635
2636static int dump_region(void *priv, abi_ulong start,
2637 abi_ulong end, unsigned long prot)
2638{
2639 FILE *f = (FILE *)priv;
2640
2641 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2642 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2643 start, end, end - start,
2644 ((prot & PAGE_READ) ? 'r' : '-'),
2645 ((prot & PAGE_WRITE) ? 'w' : '-'),
2646 ((prot & PAGE_EXEC) ? 'x' : '-'));
2647
2648 return (0);
2649}
2650
2651/* dump memory mappings */
2652void page_dump(FILE *f)
2653{
2654 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2655 "start", "end", "size", "prot");
2656 walk_memory_regions(f, dump_region);
2657}
2658
2659int page_get_flags(target_ulong address)
2660{
2661 PageDesc *p;
2662
2663 p = page_find(address >> TARGET_PAGE_BITS);
2664 if (!p)
2665 return 0;
2666 return p->flags;
2667}
2668
2669/* Modify the flags of a page and invalidate the code if necessary.
2670 The flag PAGE_WRITE_ORG is positioned automatically depending
2671 on PAGE_WRITE. The mmap_lock should already be held. */
2672void page_set_flags(target_ulong start, target_ulong end, int flags)
2673{
2674 target_ulong addr, len;
2675
2676 /* This function should never be called with addresses outside the
2677 guest address space. If this assert fires, it probably indicates
2678 a missing call to h2g_valid. */
2679#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2680 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2681#endif
2682 assert(start < end);
2683
2684 start = start & TARGET_PAGE_MASK;
2685 end = TARGET_PAGE_ALIGN(end);
2686
2687 if (flags & PAGE_WRITE) {
2688 flags |= PAGE_WRITE_ORG;
2689 }
2690
2691#ifdef VBOX
2692 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2693#endif
2694 for (addr = start, len = end - start;
2695 len != 0;
2696 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2697 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2698
2699 /* If the write protection bit is set, then we invalidate
2700 the code inside. */
2701 if (!(p->flags & PAGE_WRITE) &&
2702 (flags & PAGE_WRITE) &&
2703 p->first_tb) {
2704 tb_invalidate_phys_page(addr, 0, NULL);
2705 }
2706 p->flags = flags;
2707 }
2708}
2709
2710int page_check_range(target_ulong start, target_ulong len, int flags)
2711{
2712 PageDesc *p;
2713 target_ulong end;
2714 target_ulong addr;
2715
2716 /* This function should never be called with addresses outside the
2717 guest address space. If this assert fires, it probably indicates
2718 a missing call to h2g_valid. */
2719#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2720 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2721#endif
2722
2723 if (len == 0) {
2724 return 0;
2725 }
2726 if (start + len - 1 < start) {
2727 /* We've wrapped around. */
2728 return -1;
2729 }
2730
2731 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2732 start = start & TARGET_PAGE_MASK;
2733
2734 for (addr = start, len = end - start;
2735 len != 0;
2736 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2737 p = page_find(addr >> TARGET_PAGE_BITS);
2738 if( !p )
2739 return -1;
2740 if( !(p->flags & PAGE_VALID) )
2741 return -1;
2742
2743 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2744 return -1;
2745 if (flags & PAGE_WRITE) {
2746 if (!(p->flags & PAGE_WRITE_ORG))
2747 return -1;
2748 /* unprotect the page if it was put read-only because it
2749 contains translated code */
2750 if (!(p->flags & PAGE_WRITE)) {
2751 if (!page_unprotect(addr, 0, NULL))
2752 return -1;
2753 }
2754 return 0;
2755 }
2756 }
2757 return 0;
2758}
2759
2760/* called from signal handler: invalidate the code and unprotect the
2761 page. Return TRUE if the fault was successfully handled. */
2762int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
2763{
2764 unsigned int prot;
2765 PageDesc *p;
2766 target_ulong host_start, host_end, addr;
2767
2768 /* Technically this isn't safe inside a signal handler. However we
2769 know this only ever happens in a synchronous SEGV handler, so in
2770 practice it seems to be ok. */
2771 mmap_lock();
2772
2773 p = page_find(address >> TARGET_PAGE_BITS);
2774 if (!p) {
2775 mmap_unlock();
2776 return 0;
2777 }
2778
2779 /* if the page was really writable, then we change its
2780 protection back to writable */
2781 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2782 host_start = address & qemu_host_page_mask;
2783 host_end = host_start + qemu_host_page_size;
2784
2785 prot = 0;
2786 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2787 p = page_find(addr >> TARGET_PAGE_BITS);
2788 p->flags |= PAGE_WRITE;
2789 prot |= p->flags;
2790
2791 /* and since the content will be modified, we must invalidate
2792 the corresponding translated code. */
2793 tb_invalidate_phys_page(addr, pc, puc);
2794#ifdef DEBUG_TB_CHECK
2795 tb_invalidate_check(addr);
2796#endif
2797 }
2798 mprotect((void *)g2h(host_start), qemu_host_page_size,
2799 prot & PAGE_BITS);
2800
2801 mmap_unlock();
2802 return 1;
2803 }
2804 mmap_unlock();
2805 return 0;
2806}
2807
2808static inline void tlb_set_dirty(CPUState *env,
2809 uintptr_t addr, target_ulong vaddr)
2810{
2811}
2812#endif /* defined(CONFIG_USER_ONLY) */
2813
2814#if !defined(CONFIG_USER_ONLY)
2815
2816#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2817typedef struct subpage_t {
2818 target_phys_addr_t base;
2819 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2820 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2821} subpage_t;
2822
2823static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2824 ram_addr_t memory, ram_addr_t region_offset);
2825static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2826 ram_addr_t orig_memory,
2827 ram_addr_t region_offset);
2828#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2829 need_subpage) \
2830 do { \
2831 if (addr > start_addr) \
2832 start_addr2 = 0; \
2833 else { \
2834 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2835 if (start_addr2 > 0) \
2836 need_subpage = 1; \
2837 } \
2838 \
2839 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2840 end_addr2 = TARGET_PAGE_SIZE - 1; \
2841 else { \
2842 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2843 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2844 need_subpage = 1; \
2845 } \
2846 } while (0)
2847
2848/* register physical memory.
2849 For RAM, 'size' must be a multiple of the target page size.
2850 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2851 io memory page. The address used when calling the IO function is
2852 the offset from the start of the region, plus region_offset. Both
2853 start_addr and region_offset are rounded down to a page boundary
2854 before calculating this offset. This should not be a problem unless
2855 the low bits of start_addr and region_offset differ. */
2856void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2857 ram_addr_t size,
2858 ram_addr_t phys_offset,
2859 ram_addr_t region_offset)
2860{
2861 target_phys_addr_t addr, end_addr;
2862 PhysPageDesc *p;
2863 CPUState *env;
2864 ram_addr_t orig_size = size;
2865 subpage_t *subpage;
2866
2867#ifndef VBOX
2868 cpu_notify_set_memory(start_addr, size, phys_offset);
2869#endif /* !VBOX */
2870
2871 if (phys_offset == IO_MEM_UNASSIGNED) {
2872 region_offset = start_addr;
2873 }
2874 region_offset &= TARGET_PAGE_MASK;
2875 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2876 end_addr = start_addr + (target_phys_addr_t)size;
2877 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2878 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2879 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2880 ram_addr_t orig_memory = p->phys_offset;
2881 target_phys_addr_t start_addr2, end_addr2;
2882 int need_subpage = 0;
2883
2884 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2885 need_subpage);
2886 if (need_subpage) {
2887 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2888 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2889 &p->phys_offset, orig_memory,
2890 p->region_offset);
2891 } else {
2892 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2893 >> IO_MEM_SHIFT];
2894 }
2895 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2896 region_offset);
2897 p->region_offset = 0;
2898 } else {
2899 p->phys_offset = phys_offset;
2900 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2901 (phys_offset & IO_MEM_ROMD))
2902 phys_offset += TARGET_PAGE_SIZE;
2903 }
2904 } else {
2905 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2906 p->phys_offset = phys_offset;
2907 p->region_offset = region_offset;
2908 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2909 (phys_offset & IO_MEM_ROMD)) {
2910 phys_offset += TARGET_PAGE_SIZE;
2911 } else {
2912 target_phys_addr_t start_addr2, end_addr2;
2913 int need_subpage = 0;
2914
2915 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2916 end_addr2, need_subpage);
2917
2918 if (need_subpage) {
2919 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2920 &p->phys_offset, IO_MEM_UNASSIGNED,
2921 addr & TARGET_PAGE_MASK);
2922 subpage_register(subpage, start_addr2, end_addr2,
2923 phys_offset, region_offset);
2924 p->region_offset = 0;
2925 }
2926 }
2927 }
2928 region_offset += TARGET_PAGE_SIZE;
2929 }
2930
2931 /* since each CPU stores ram addresses in its TLB cache, we must
2932 reset the modified entries */
2933#ifndef VBOX
2934 /* XXX: slow ! */
2935 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2936 tlb_flush(env, 1);
2937 }
2938#else
2939 /* We have one thread per CPU, so, one of the other EMTs might be executing
2940 code right now and flushing the TLB may crash it. */
2941 env = first_cpu;
2942 if (EMRemIsLockOwner(env->pVM))
2943 tlb_flush(env, 1);
2944 else
2945 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request,
2946 CPU_INTERRUPT_EXTERNAL_FLUSH_TLB);
2947#endif
2948}
2949
2950/* XXX: temporary until new memory mapping API */
2951ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2952{
2953 PhysPageDesc *p;
2954
2955 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2956 if (!p)
2957 return IO_MEM_UNASSIGNED;
2958 return p->phys_offset;
2959}
2960
2961#ifndef VBOX
2962
2963void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2964{
2965 if (kvm_enabled())
2966 kvm_coalesce_mmio_region(addr, size);
2967}
2968
2969void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2970{
2971 if (kvm_enabled())
2972 kvm_uncoalesce_mmio_region(addr, size);
2973}
2974
2975void qemu_flush_coalesced_mmio_buffer(void)
2976{
2977 if (kvm_enabled())
2978 kvm_flush_coalesced_mmio_buffer();
2979}
2980
2981#if defined(__linux__) && !defined(TARGET_S390X)
2982
2983#include <sys/vfs.h>
2984
2985#define HUGETLBFS_MAGIC 0x958458f6
2986
2987static size_t gethugepagesize(const char *path)
2988{
2989 struct statfs fs;
2990 int ret;
2991
2992 do {
2993 ret = statfs(path, &fs);
2994 } while (ret != 0 && errno == EINTR);
2995
2996 if (ret != 0) {
2997 perror(path);
2998 return 0;
2999 }
3000
3001 if (fs.f_type != HUGETLBFS_MAGIC)
3002 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
3003
3004 return (size_t)fs.f_bsize;
3005}
3006
3007static void *file_ram_alloc(RAMBlock *block,
3008 ram_addr_t memory,
3009 const char *path)
3010{
3011 char *filename;
3012 void *area;
3013 int fd;
3014#ifdef MAP_POPULATE
3015 int flags;
3016#endif
3017 size_t hpagesize;
3018
3019 hpagesize = gethugepagesize(path);
3020 if (!hpagesize) {
3021 return NULL;
3022 }
3023
3024 if (memory < hpagesize) {
3025 return NULL;
3026 }
3027
3028 if (kvm_enabled() && !kvm_has_sync_mmu()) {
3029 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
3030 return NULL;
3031 }
3032
3033 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
3034 return NULL;
3035 }
3036
3037 fd = mkstemp(filename);
3038 if (fd < 0) {
3039 perror("unable to create backing store for hugepages");
3040 free(filename);
3041 return NULL;
3042 }
3043 unlink(filename);
3044 free(filename);
3045
3046 memory = (memory+hpagesize-1) & ~(hpagesize-1);
3047
3048 /*
3049 * ftruncate is not supported by hugetlbfs in older
3050 * hosts, so don't bother bailing out on errors.
3051 * If anything goes wrong with it under other filesystems,
3052 * mmap will fail.
3053 */
3054 if (ftruncate(fd, memory))
3055 perror("ftruncate");
3056
3057#ifdef MAP_POPULATE
3058 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
3059 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
3060 * to sidestep this quirk.
3061 */
3062 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
3063 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
3064#else
3065 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
3066#endif
3067 if (area == MAP_FAILED) {
3068 perror("file_ram_alloc: can't mmap RAM pages");
3069 close(fd);
3070 return (NULL);
3071 }
3072 block->fd = fd;
3073 return area;
3074}
3075#endif
3076
3077static ram_addr_t find_ram_offset(ram_addr_t size)
3078{
3079 RAMBlock *block, *next_block;
3080 ram_addr_t offset = 0, mingap = ULONG_MAX;
3081
3082 if (QLIST_EMPTY(&ram_list.blocks))
3083 return 0;
3084
3085 QLIST_FOREACH(block, &ram_list.blocks, next) {
3086 ram_addr_t end, next = ULONG_MAX;
3087
3088 end = block->offset + block->length;
3089
3090 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
3091 if (next_block->offset >= end) {
3092 next = MIN(next, next_block->offset);
3093 }
3094 }
3095 if (next - end >= size && next - end < mingap) {
3096 offset = end;
3097 mingap = next - end;
3098 }
3099 }
3100 return offset;
3101}
3102
3103static ram_addr_t last_ram_offset(void)
3104{
3105 RAMBlock *block;
3106 ram_addr_t last = 0;
3107
3108 QLIST_FOREACH(block, &ram_list.blocks, next)
3109 last = MAX(last, block->offset + block->length);
3110
3111 return last;
3112}
3113
3114ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
3115 ram_addr_t size, void *host)
3116{
3117 RAMBlock *new_block, *block;
3118
3119 size = TARGET_PAGE_ALIGN(size);
3120 new_block = qemu_mallocz(sizeof(*new_block));
3121
3122 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
3123 char *id = dev->parent_bus->info->get_dev_path(dev);
3124 if (id) {
3125 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
3126 qemu_free(id);
3127 }
3128 }
3129 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
3130
3131 QLIST_FOREACH(block, &ram_list.blocks, next) {
3132 if (!strcmp(block->idstr, new_block->idstr)) {
3133 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
3134 new_block->idstr);
3135 abort();
3136 }
3137 }
3138
3139 new_block->host = host;
3140
3141 new_block->offset = find_ram_offset(size);
3142 new_block->length = size;
3143
3144 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
3145
3146 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
3147 last_ram_offset() >> TARGET_PAGE_BITS);
3148 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
3149 0xff, size >> TARGET_PAGE_BITS);
3150
3151 if (kvm_enabled())
3152 kvm_setup_guest_memory(new_block->host, size);
3153
3154 return new_block->offset;
3155}
3156
3157ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
3158{
3159 RAMBlock *new_block, *block;
3160
3161 size = TARGET_PAGE_ALIGN(size);
3162 new_block = qemu_mallocz(sizeof(*new_block));
3163
3164 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
3165 char *id = dev->parent_bus->info->get_dev_path(dev);
3166 if (id) {
3167 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
3168 qemu_free(id);
3169 }
3170 }
3171 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
3172
3173 QLIST_FOREACH(block, &ram_list.blocks, next) {
3174 if (!strcmp(block->idstr, new_block->idstr)) {
3175 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
3176 new_block->idstr);
3177 abort();
3178 }
3179 }
3180
3181 if (mem_path) {
3182#if defined (__linux__) && !defined(TARGET_S390X)
3183 new_block->host = file_ram_alloc(new_block, size, mem_path);
3184 if (!new_block->host) {
3185 new_block->host = qemu_vmalloc(size);
3186#ifdef MADV_MERGEABLE
3187 madvise(new_block->host, size, MADV_MERGEABLE);
3188#endif
3189 }
3190#else
3191 fprintf(stderr, "-mem-path option unsupported\n");
3192 exit(1);
3193#endif
3194 } else {
3195#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3196 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
3197 new_block->host = mmap((void*)0x1000000, size,
3198 PROT_EXEC|PROT_READ|PROT_WRITE,
3199 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
3200#else
3201 new_block->host = qemu_vmalloc(size);
3202#endif
3203#ifdef MADV_MERGEABLE
3204 madvise(new_block->host, size, MADV_MERGEABLE);
3205#endif
3206 }
3207 new_block->offset = find_ram_offset(size);
3208 new_block->length = size;
3209
3210 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
3211
3212 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
3213 last_ram_offset() >> TARGET_PAGE_BITS);
3214 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
3215 0xff, size >> TARGET_PAGE_BITS);
3216
3217 if (kvm_enabled())
3218 kvm_setup_guest_memory(new_block->host, size);
3219
3220 return new_block->offset;
3221}
3222
3223void qemu_ram_free(ram_addr_t addr)
3224{
3225 RAMBlock *block;
3226
3227 QLIST_FOREACH(block, &ram_list.blocks, next) {
3228 if (addr == block->offset) {
3229 QLIST_REMOVE(block, next);
3230 if (mem_path) {
3231#if defined (__linux__) && !defined(TARGET_S390X)
3232 if (block->fd) {
3233 munmap(block->host, block->length);
3234 close(block->fd);
3235 } else {
3236 qemu_vfree(block->host);
3237 }
3238#endif
3239 } else {
3240#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3241 munmap(block->host, block->length);
3242#else
3243 qemu_vfree(block->host);
3244#endif
3245 }
3246 qemu_free(block);
3247 return;
3248 }
3249 }
3250
3251}
3252
3253/* Return a host pointer to ram allocated with qemu_ram_alloc.
3254 With the exception of the softmmu code in this file, this should
3255 only be used for local memory (e.g. video ram) that the device owns,
3256 and knows it isn't going to access beyond the end of the block.
3257
3258 It should not be used for general purpose DMA.
3259 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3260 */
3261void *qemu_get_ram_ptr(ram_addr_t addr)
3262{
3263 RAMBlock *block;
3264
3265 QLIST_FOREACH(block, &ram_list.blocks, next) {
3266 if (addr - block->offset < block->length) {
3267 QLIST_REMOVE(block, next);
3268 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3269 return block->host + (addr - block->offset);
3270 }
3271 }
3272
3273 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3274 abort();
3275
3276 return NULL;
3277}
3278
3279/* Some of the softmmu routines need to translate from a host pointer
3280 (typically a TLB entry) back to a ram offset. */
3281ram_addr_t qemu_ram_addr_from_host(void *ptr)
3282{
3283 RAMBlock *block;
3284 uint8_t *host = ptr;
3285
3286 QLIST_FOREACH(block, &ram_list.blocks, next) {
3287 if (host - block->host < block->length) {
3288 return block->offset + (host - block->host);
3289 }
3290 }
3291
3292 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3293 abort();
3294
3295 return 0;
3296}
3297
3298#endif /* !VBOX */
3299
3300static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
3301{
3302#ifdef DEBUG_UNASSIGNED
3303 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3304#endif
3305#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3306 do_unassigned_access(addr, 0, 0, 0, 1);
3307#endif
3308 return 0;
3309}
3310
3311static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3312{
3313#ifdef DEBUG_UNASSIGNED
3314 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3315#endif
3316#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3317 do_unassigned_access(addr, 0, 0, 0, 2);
3318#endif
3319 return 0;
3320}
3321
3322static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3323{
3324#ifdef DEBUG_UNASSIGNED
3325 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3326#endif
3327#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3328 do_unassigned_access(addr, 0, 0, 0, 4);
3329#endif
3330 return 0;
3331}
3332
3333static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3334{
3335#ifdef DEBUG_UNASSIGNED
3336 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3337#endif
3338#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3339 do_unassigned_access(addr, 1, 0, 0, 1);
3340#endif
3341}
3342
3343static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3344{
3345#ifdef DEBUG_UNASSIGNED
3346 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3347#endif
3348#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3349 do_unassigned_access(addr, 1, 0, 0, 2);
3350#endif
3351}
3352
3353static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3354{
3355#ifdef DEBUG_UNASSIGNED
3356 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3357#endif
3358#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3359 do_unassigned_access(addr, 1, 0, 0, 4);
3360#endif
3361}
3362
3363static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3364 unassigned_mem_readb,
3365 unassigned_mem_readw,
3366 unassigned_mem_readl,
3367};
3368
3369static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3370 unassigned_mem_writeb,
3371 unassigned_mem_writew,
3372 unassigned_mem_writel,
3373};
3374
3375static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3376 uint32_t val)
3377{
3378 int dirty_flags;
3379 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3380 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3381#if !defined(CONFIG_USER_ONLY)
3382 tb_invalidate_phys_page_fast(ram_addr, 1);
3383 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3384#endif
3385 }
3386#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
3387 remR3PhysWriteU8(ram_addr, val);
3388#else
3389 stb_p(qemu_get_ram_ptr(ram_addr), val);
3390#endif
3391 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3392 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3393 /* we remove the notdirty callback only if the code has been
3394 flushed */
3395 if (dirty_flags == 0xff)
3396 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3397}
3398
3399static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3400 uint32_t val)
3401{
3402 int dirty_flags;
3403 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3404 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3405#if !defined(CONFIG_USER_ONLY)
3406 tb_invalidate_phys_page_fast(ram_addr, 2);
3407 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3408#endif
3409 }
3410#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
3411 remR3PhysWriteU16(ram_addr, val);
3412#else
3413 stw_p(qemu_get_ram_ptr(ram_addr), val);
3414#endif
3415 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3416 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3417 /* we remove the notdirty callback only if the code has been
3418 flushed */
3419 if (dirty_flags == 0xff)
3420 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3421}
3422
3423static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3424 uint32_t val)
3425{
3426 int dirty_flags;
3427 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3428 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3429#if !defined(CONFIG_USER_ONLY)
3430 tb_invalidate_phys_page_fast(ram_addr, 4);
3431 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3432#endif
3433 }
3434#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
3435 remR3PhysWriteU32(ram_addr, val);
3436#else
3437 stl_p(qemu_get_ram_ptr(ram_addr), val);
3438#endif
3439 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3440 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3441 /* we remove the notdirty callback only if the code has been
3442 flushed */
3443 if (dirty_flags == 0xff)
3444 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3445}
3446
3447static CPUReadMemoryFunc * const error_mem_read[3] = {
3448 NULL, /* never used */
3449 NULL, /* never used */
3450 NULL, /* never used */
3451};
3452
3453static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3454 notdirty_mem_writeb,
3455 notdirty_mem_writew,
3456 notdirty_mem_writel,
3457};
3458
3459/* Generate a debug exception if a watchpoint has been hit. */
3460static void check_watchpoint(int offset, int len_mask, int flags)
3461{
3462 CPUState *env = cpu_single_env;
3463 target_ulong pc, cs_base;
3464 TranslationBlock *tb;
3465 target_ulong vaddr;
3466 CPUWatchpoint *wp;
3467 int cpu_flags;
3468
3469 if (env->watchpoint_hit) {
3470 /* We re-entered the check after replacing the TB. Now raise
3471 * the debug interrupt so that is will trigger after the
3472 * current instruction. */
3473 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3474 return;
3475 }
3476 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3477 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3478 if ((vaddr == (wp->vaddr & len_mask) ||
3479 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3480 wp->flags |= BP_WATCHPOINT_HIT;
3481 if (!env->watchpoint_hit) {
3482 env->watchpoint_hit = wp;
3483 tb = tb_find_pc(env->mem_io_pc);
3484 if (!tb) {
3485 cpu_abort(env, "check_watchpoint: could not find TB for "
3486 "pc=%p", (void *)env->mem_io_pc);
3487 }
3488 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3489 tb_phys_invalidate(tb, -1);
3490 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3491 env->exception_index = EXCP_DEBUG;
3492 } else {
3493 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3494 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3495 }
3496 cpu_resume_from_signal(env, NULL);
3497 }
3498 } else {
3499 wp->flags &= ~BP_WATCHPOINT_HIT;
3500 }
3501 }
3502}
3503
3504/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3505 so these check for a hit then pass through to the normal out-of-line
3506 phys routines. */
3507static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3508{
3509 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3510 return ldub_phys(addr);
3511}
3512
3513static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3514{
3515 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3516 return lduw_phys(addr);
3517}
3518
3519static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3520{
3521 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3522 return ldl_phys(addr);
3523}
3524
3525static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3526 uint32_t val)
3527{
3528 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3529 stb_phys(addr, val);
3530}
3531
3532static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3533 uint32_t val)
3534{
3535 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3536 stw_phys(addr, val);
3537}
3538
3539static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3540 uint32_t val)
3541{
3542 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3543 stl_phys(addr, val);
3544}
3545
3546static CPUReadMemoryFunc * const watch_mem_read[3] = {
3547 watch_mem_readb,
3548 watch_mem_readw,
3549 watch_mem_readl,
3550};
3551
3552static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3553 watch_mem_writeb,
3554 watch_mem_writew,
3555 watch_mem_writel,
3556};
3557
3558static inline uint32_t subpage_readlen (subpage_t *mmio,
3559 target_phys_addr_t addr,
3560 unsigned int len)
3561{
3562 unsigned int idx = SUBPAGE_IDX(addr);
3563#if defined(DEBUG_SUBPAGE)
3564 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3565 mmio, len, addr, idx);
3566#endif
3567
3568 addr += mmio->region_offset[idx];
3569 idx = mmio->sub_io_index[idx];
3570 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3571}
3572
3573static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3574 uint32_t value, unsigned int len)
3575{
3576 unsigned int idx = SUBPAGE_IDX(addr);
3577#if defined(DEBUG_SUBPAGE)
3578 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3579 __func__, mmio, len, addr, idx, value);
3580#endif
3581
3582 addr += mmio->region_offset[idx];
3583 idx = mmio->sub_io_index[idx];
3584 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3585}
3586
3587static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3588{
3589 return subpage_readlen(opaque, addr, 0);
3590}
3591
3592static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3593 uint32_t value)
3594{
3595 subpage_writelen(opaque, addr, value, 0);
3596}
3597
3598static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3599{
3600 return subpage_readlen(opaque, addr, 1);
3601}
3602
3603static void subpage_writew (void *opaque, target_phys_addr_t addr,
3604 uint32_t value)
3605{
3606 subpage_writelen(opaque, addr, value, 1);
3607}
3608
3609static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3610{
3611 return subpage_readlen(opaque, addr, 2);
3612}
3613
3614static void subpage_writel (void *opaque, target_phys_addr_t addr,
3615 uint32_t value)
3616{
3617 subpage_writelen(opaque, addr, value, 2);
3618}
3619
3620static CPUReadMemoryFunc * const subpage_read[] = {
3621 &subpage_readb,
3622 &subpage_readw,
3623 &subpage_readl,
3624};
3625
3626static CPUWriteMemoryFunc * const subpage_write[] = {
3627 &subpage_writeb,
3628 &subpage_writew,
3629 &subpage_writel,
3630};
3631
3632static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3633 ram_addr_t memory, ram_addr_t region_offset)
3634{
3635 int idx, eidx;
3636
3637 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3638 return -1;
3639 idx = SUBPAGE_IDX(start);
3640 eidx = SUBPAGE_IDX(end);
3641#if defined(DEBUG_SUBPAGE)
3642 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3643 mmio, start, end, idx, eidx, memory);
3644#endif
3645 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3646 for (; idx <= eidx; idx++) {
3647 mmio->sub_io_index[idx] = memory;
3648 mmio->region_offset[idx] = region_offset;
3649 }
3650
3651 return 0;
3652}
3653
3654static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3655 ram_addr_t orig_memory,
3656 ram_addr_t region_offset)
3657{
3658 subpage_t *mmio;
3659 int subpage_memory;
3660
3661 mmio = qemu_mallocz(sizeof(subpage_t));
3662
3663 mmio->base = base;
3664 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3665#if defined(DEBUG_SUBPAGE)
3666 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3667 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3668#endif
3669 *phys = subpage_memory | IO_MEM_SUBPAGE;
3670 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3671
3672 return mmio;
3673}
3674
3675static int get_free_io_mem_idx(void)
3676{
3677 int i;
3678
3679 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3680 if (!io_mem_used[i]) {
3681 io_mem_used[i] = 1;
3682 return i;
3683 }
3684 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3685 return -1;
3686}
3687
3688/* mem_read and mem_write are arrays of functions containing the
3689 function to access byte (index 0), word (index 1) and dword (index
3690 2). Functions can be omitted with a NULL function pointer.
3691 If io_index is non zero, the corresponding io zone is
3692 modified. If it is zero, a new io zone is allocated. The return
3693 value can be used with cpu_register_physical_memory(). (-1) is
3694 returned if error. */
3695static int cpu_register_io_memory_fixed(int io_index,
3696 CPUReadMemoryFunc * const *mem_read,
3697 CPUWriteMemoryFunc * const *mem_write,
3698 void *opaque)
3699{
3700 int i;
3701
3702 if (io_index <= 0) {
3703 io_index = get_free_io_mem_idx();
3704 if (io_index == -1)
3705 return io_index;
3706 } else {
3707 io_index >>= IO_MEM_SHIFT;
3708 if (io_index >= IO_MEM_NB_ENTRIES)
3709 return -1;
3710 }
3711
3712 for (i = 0; i < 3; ++i) {
3713 io_mem_read[io_index][i]
3714 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3715 }
3716 for (i = 0; i < 3; ++i) {
3717 io_mem_write[io_index][i]
3718 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3719 }
3720 io_mem_opaque[io_index] = opaque;
3721
3722 return (io_index << IO_MEM_SHIFT);
3723}
3724
3725int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3726 CPUWriteMemoryFunc * const *mem_write,
3727 void *opaque)
3728{
3729 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3730}
3731
3732void cpu_unregister_io_memory(int io_table_address)
3733{
3734 int i;
3735 int io_index = io_table_address >> IO_MEM_SHIFT;
3736
3737 for (i=0;i < 3; i++) {
3738 io_mem_read[io_index][i] = unassigned_mem_read[i];
3739 io_mem_write[io_index][i] = unassigned_mem_write[i];
3740 }
3741 io_mem_opaque[io_index] = NULL;
3742 io_mem_used[io_index] = 0;
3743}
3744
3745static void io_mem_init(void)
3746{
3747 int i;
3748
3749 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3750 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3751 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3752 for (i=0; i<5; i++)
3753 io_mem_used[i] = 1;
3754
3755 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3756 watch_mem_write, NULL);
3757}
3758
3759#endif /* !defined(CONFIG_USER_ONLY) */
3760
3761/* physical memory access (slow version, mainly for debug) */
3762#if defined(CONFIG_USER_ONLY)
3763int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3764 uint8_t *buf, int len, int is_write)
3765{
3766 int l, flags;
3767 target_ulong page;
3768 void * p;
3769
3770 while (len > 0) {
3771 page = addr & TARGET_PAGE_MASK;
3772 l = (page + TARGET_PAGE_SIZE) - addr;
3773 if (l > len)
3774 l = len;
3775 flags = page_get_flags(page);
3776 if (!(flags & PAGE_VALID))
3777 return -1;
3778 if (is_write) {
3779 if (!(flags & PAGE_WRITE))
3780 return -1;
3781 /* XXX: this code should not depend on lock_user */
3782 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3783 return -1;
3784 memcpy(p, buf, l);
3785 unlock_user(p, addr, l);
3786 } else {
3787 if (!(flags & PAGE_READ))
3788 return -1;
3789 /* XXX: this code should not depend on lock_user */
3790 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3791 return -1;
3792 memcpy(buf, p, l);
3793 unlock_user(p, addr, 0);
3794 }
3795 len -= l;
3796 buf += l;
3797 addr += l;
3798 }
3799 return 0;
3800}
3801
3802#else
3803void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3804 int len, int is_write)
3805{
3806 int l, io_index;
3807 uint8_t *ptr;
3808 uint32_t val;
3809 target_phys_addr_t page;
3810 ram_addr_t pd;
3811 PhysPageDesc *p;
3812
3813 while (len > 0) {
3814 page = addr & TARGET_PAGE_MASK;
3815 l = (page + TARGET_PAGE_SIZE) - addr;
3816 if (l > len)
3817 l = len;
3818 p = phys_page_find(page >> TARGET_PAGE_BITS);
3819 if (!p) {
3820 pd = IO_MEM_UNASSIGNED;
3821 } else {
3822 pd = p->phys_offset;
3823 }
3824
3825 if (is_write) {
3826 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3827 target_phys_addr_t addr1 = addr;
3828 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3829 if (p)
3830 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3831 /* XXX: could force cpu_single_env to NULL to avoid
3832 potential bugs */
3833 if (l >= 4 && ((addr1 & 3) == 0)) {
3834 /* 32 bit write access */
3835#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3836 val = ldl_p(buf);
3837#else
3838 val = *(const uint32_t *)buf;
3839#endif
3840 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3841 l = 4;
3842 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3843 /* 16 bit write access */
3844#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3845 val = lduw_p(buf);
3846#else
3847 val = *(const uint16_t *)buf;
3848#endif
3849 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3850 l = 2;
3851 } else {
3852 /* 8 bit write access */
3853#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3854 val = ldub_p(buf);
3855#else
3856 val = *(const uint8_t *)buf;
3857#endif
3858 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3859 l = 1;
3860 }
3861 } else {
3862 ram_addr_t addr1;
3863 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3864 /* RAM case */
3865#ifdef VBOX
3866 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3867#else
3868 ptr = qemu_get_ram_ptr(addr1);
3869 memcpy(ptr, buf, l);
3870#endif
3871 if (!cpu_physical_memory_is_dirty(addr1)) {
3872 /* invalidate code */
3873 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3874 /* set dirty bit */
3875 cpu_physical_memory_set_dirty_flags(
3876 addr1, (0xff & ~CODE_DIRTY_FLAG));
3877 }
3878 }
3879 } else {
3880 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3881 !(pd & IO_MEM_ROMD)) {
3882 target_phys_addr_t addr1 = addr;
3883 /* I/O case */
3884 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3885 if (p)
3886 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3887 if (l >= 4 && ((addr1 & 3) == 0)) {
3888 /* 32 bit read access */
3889 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3890#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3891 stl_p(buf, val);
3892#else
3893 *(uint32_t *)buf = val;
3894#endif
3895 l = 4;
3896 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3897 /* 16 bit read access */
3898 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3899#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3900 stw_p(buf, val);
3901#else
3902 *(uint16_t *)buf = val;
3903#endif
3904 l = 2;
3905 } else {
3906 /* 8 bit read access */
3907 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3908#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3909 stb_p(buf, val);
3910#else
3911 *(uint8_t *)buf = val;
3912#endif
3913 l = 1;
3914 }
3915 } else {
3916 /* RAM case */
3917#ifdef VBOX
3918 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3919#else
3920 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3921 (addr & ~TARGET_PAGE_MASK);
3922 memcpy(buf, ptr, l);
3923#endif
3924 }
3925 }
3926 len -= l;
3927 buf += l;
3928 addr += l;
3929 }
3930}
3931
3932#ifndef VBOX
3933
3934/* used for ROM loading : can write in RAM and ROM */
3935void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3936 const uint8_t *buf, int len)
3937{
3938 int l;
3939 uint8_t *ptr;
3940 target_phys_addr_t page;
3941 ram_addr_t pd;
3942 PhysPageDesc *p;
3943
3944 while (len > 0) {
3945 page = addr & TARGET_PAGE_MASK;
3946 l = (page + TARGET_PAGE_SIZE) - addr;
3947 if (l > len)
3948 l = len;
3949 p = phys_page_find(page >> TARGET_PAGE_BITS);
3950 if (!p) {
3951 pd = IO_MEM_UNASSIGNED;
3952 } else {
3953 pd = p->phys_offset;
3954 }
3955
3956 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3957 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3958 !(pd & IO_MEM_ROMD)) {
3959 /* do nothing */
3960 } else {
3961 ram_addr_t addr1;
3962 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3963 /* ROM/RAM case */
3964 ptr = qemu_get_ram_ptr(addr1);
3965 memcpy(ptr, buf, l);
3966 }
3967 len -= l;
3968 buf += l;
3969 addr += l;
3970 }
3971}
3972
3973typedef struct {
3974 void *buffer;
3975 target_phys_addr_t addr;
3976 target_phys_addr_t len;
3977} BounceBuffer;
3978
3979static BounceBuffer bounce;
3980
3981typedef struct MapClient {
3982 void *opaque;
3983 void (*callback)(void *opaque);
3984 QLIST_ENTRY(MapClient) link;
3985} MapClient;
3986
3987static QLIST_HEAD(map_client_list, MapClient) map_client_list
3988 = QLIST_HEAD_INITIALIZER(map_client_list);
3989
3990void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3991{
3992 MapClient *client = qemu_malloc(sizeof(*client));
3993
3994 client->opaque = opaque;
3995 client->callback = callback;
3996 QLIST_INSERT_HEAD(&map_client_list, client, link);
3997 return client;
3998}
3999
4000void cpu_unregister_map_client(void *_client)
4001{
4002 MapClient *client = (MapClient *)_client;
4003
4004 QLIST_REMOVE(client, link);
4005 qemu_free(client);
4006}
4007
4008static void cpu_notify_map_clients(void)
4009{
4010 MapClient *client;
4011
4012 while (!QLIST_EMPTY(&map_client_list)) {
4013 client = QLIST_FIRST(&map_client_list);
4014 client->callback(client->opaque);
4015 cpu_unregister_map_client(client);
4016 }
4017}
4018
4019/* Map a physical memory region into a host virtual address.
4020 * May map a subset of the requested range, given by and returned in *plen.
4021 * May return NULL if resources needed to perform the mapping are exhausted.
4022 * Use only for reads OR writes - not for read-modify-write operations.
4023 * Use cpu_register_map_client() to know when retrying the map operation is
4024 * likely to succeed.
4025 */
4026void *cpu_physical_memory_map(target_phys_addr_t addr,
4027 target_phys_addr_t *plen,
4028 int is_write)
4029{
4030 target_phys_addr_t len = *plen;
4031 target_phys_addr_t done = 0;
4032 int l;
4033 uint8_t *ret = NULL;
4034 uint8_t *ptr;
4035 target_phys_addr_t page;
4036 ram_addr_t pd;
4037 PhysPageDesc *p;
4038 ram_addr_t addr1;
4039
4040 while (len > 0) {
4041 page = addr & TARGET_PAGE_MASK;
4042 l = (page + TARGET_PAGE_SIZE) - addr;
4043 if (l > len)
4044 l = len;
4045 p = phys_page_find(page >> TARGET_PAGE_BITS);
4046 if (!p) {
4047 pd = IO_MEM_UNASSIGNED;
4048 } else {
4049 pd = p->phys_offset;
4050 }
4051
4052 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4053 if (done || bounce.buffer) {
4054 break;
4055 }
4056 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4057 bounce.addr = addr;
4058 bounce.len = l;
4059 if (!is_write) {
4060 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
4061 }
4062 ptr = bounce.buffer;
4063 } else {
4064 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4065 ptr = qemu_get_ram_ptr(addr1);
4066 }
4067 if (!done) {
4068 ret = ptr;
4069 } else if (ret + done != ptr) {
4070 break;
4071 }
4072
4073 len -= l;
4074 addr += l;
4075 done += l;
4076 }
4077 *plen = done;
4078 return ret;
4079}
4080
4081/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4082 * Will also mark the memory as dirty if is_write == 1. access_len gives
4083 * the amount of memory that was actually read or written by the caller.
4084 */
4085void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4086 int is_write, target_phys_addr_t access_len)
4087{
4088 if (buffer != bounce.buffer) {
4089 if (is_write) {
4090 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
4091 while (access_len) {
4092 unsigned l;
4093 l = TARGET_PAGE_SIZE;
4094 if (l > access_len)
4095 l = access_len;
4096 if (!cpu_physical_memory_is_dirty(addr1)) {
4097 /* invalidate code */
4098 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4099 /* set dirty bit */
4100 cpu_physical_memory_set_dirty_flags(
4101 addr1, (0xff & ~CODE_DIRTY_FLAG));
4102 }
4103 addr1 += l;
4104 access_len -= l;
4105 }
4106 }
4107 return;
4108 }
4109 if (is_write) {
4110 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4111 }
4112 qemu_vfree(bounce.buffer);
4113 bounce.buffer = NULL;
4114 cpu_notify_map_clients();
4115}
4116
4117#endif /* !VBOX */
4118
4119/* warning: addr must be aligned */
4120uint32_t ldl_phys(target_phys_addr_t addr)
4121{
4122 int io_index;
4123 uint8_t *ptr;
4124 uint32_t val;
4125 ram_addr_t pd;
4126 PhysPageDesc *p;
4127
4128 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4129 if (!p) {
4130 pd = IO_MEM_UNASSIGNED;
4131 } else {
4132 pd = p->phys_offset;
4133 }
4134
4135 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4136 !(pd & IO_MEM_ROMD)) {
4137 /* I/O case */
4138 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4139 if (p)
4140 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4141 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4142 } else {
4143 /* RAM case */
4144#ifndef VBOX
4145 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4146 (addr & ~TARGET_PAGE_MASK);
4147 val = ldl_p(ptr);
4148#else
4149 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
4150#endif
4151 }
4152 return val;
4153}
4154
4155/* warning: addr must be aligned */
4156uint64_t ldq_phys(target_phys_addr_t addr)
4157{
4158 int io_index;
4159 uint8_t *ptr;
4160 uint64_t val;
4161 ram_addr_t pd;
4162 PhysPageDesc *p;
4163
4164 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4165 if (!p) {
4166 pd = IO_MEM_UNASSIGNED;
4167 } else {
4168 pd = p->phys_offset;
4169 }
4170
4171 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4172 !(pd & IO_MEM_ROMD)) {
4173 /* I/O case */
4174 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4175 if (p)
4176 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4177#ifdef TARGET_WORDS_BIGENDIAN
4178 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4179 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4180#else
4181 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4182 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4183#endif
4184 } else {
4185 /* RAM case */
4186#ifndef VBOX
4187 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4188 (addr & ~TARGET_PAGE_MASK);
4189 val = ldq_p(ptr);
4190#else
4191 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
4192#endif
4193 }
4194 return val;
4195}
4196
4197/* XXX: optimize */
4198uint32_t ldub_phys(target_phys_addr_t addr)
4199{
4200 uint8_t val;
4201 cpu_physical_memory_read(addr, &val, 1);
4202 return val;
4203}
4204
4205/* warning: addr must be aligned */
4206uint32_t lduw_phys(target_phys_addr_t addr)
4207{
4208 int io_index;
4209#ifndef VBOX
4210 uint8_t *ptr;
4211#endif
4212 uint64_t val;
4213 ram_addr_t pd;
4214 PhysPageDesc *p;
4215
4216 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4217 if (!p) {
4218 pd = IO_MEM_UNASSIGNED;
4219 } else {
4220 pd = p->phys_offset;
4221 }
4222
4223 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4224 !(pd & IO_MEM_ROMD)) {
4225 /* I/O case */
4226 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4227 if (p)
4228 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4229 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4230 } else {
4231 /* RAM case */
4232#ifndef VBOX
4233 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4234 (addr & ~TARGET_PAGE_MASK);
4235 val = lduw_p(ptr);
4236#else
4237 val = remR3PhysReadU16((pd & TARGET_PAGE_MASK) | (addr & ~TARGET_PAGE_MASK));
4238#endif
4239 }
4240 return val;
4241}
4242
4243/* warning: addr must be aligned. The ram page is not masked as dirty
4244 and the code inside is not invalidated. It is useful if the dirty
4245 bits are used to track modified PTEs */
4246void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4247{
4248 int io_index;
4249 uint8_t *ptr;
4250 ram_addr_t pd;
4251 PhysPageDesc *p;
4252
4253 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4254 if (!p) {
4255 pd = IO_MEM_UNASSIGNED;
4256 } else {
4257 pd = p->phys_offset;
4258 }
4259
4260 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4261 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4262 if (p)
4263 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4264 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4265 } else {
4266#ifndef VBOX
4267 ram_addr_t addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4268 ptr = qemu_get_ram_ptr(addr1);
4269 stl_p(ptr, val);
4270#else
4271 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
4272#endif
4273
4274#ifndef VBOX
4275 if (unlikely(in_migration)) {
4276 if (!cpu_physical_memory_is_dirty(addr1)) {
4277 /* invalidate code */
4278 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4279 /* set dirty bit */
4280 cpu_physical_memory_set_dirty_flags(
4281 addr1, (0xff & ~CODE_DIRTY_FLAG));
4282 }
4283 }
4284#endif /* !VBOX */
4285 }
4286}
4287
4288void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4289{
4290 int io_index;
4291 uint8_t *ptr;
4292 ram_addr_t pd;
4293 PhysPageDesc *p;
4294
4295 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4296 if (!p) {
4297 pd = IO_MEM_UNASSIGNED;
4298 } else {
4299 pd = p->phys_offset;
4300 }
4301
4302 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4303 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4304 if (p)
4305 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4306#ifdef TARGET_WORDS_BIGENDIAN
4307 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4308 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4309#else
4310 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4311 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4312#endif
4313 } else {
4314#ifndef VBOX
4315 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4316 (addr & ~TARGET_PAGE_MASK);
4317 stq_p(ptr, val);
4318#else
4319 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
4320#endif
4321 }
4322}
4323
4324/* warning: addr must be aligned */
4325void stl_phys(target_phys_addr_t addr, uint32_t val)
4326{
4327 int io_index;
4328 uint8_t *ptr;
4329 ram_addr_t pd;
4330 PhysPageDesc *p;
4331
4332 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4333 if (!p) {
4334 pd = IO_MEM_UNASSIGNED;
4335 } else {
4336 pd = p->phys_offset;
4337 }
4338
4339 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4340 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4341 if (p)
4342 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4343 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4344 } else {
4345 ram_addr_t addr1;
4346 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4347 /* RAM case */
4348#ifndef VBOX
4349 ptr = qemu_get_ram_ptr(addr1);
4350 stl_p(ptr, val);
4351#else
4352 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
4353#endif
4354 if (!cpu_physical_memory_is_dirty(addr1)) {
4355 /* invalidate code */
4356 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4357 /* set dirty bit */
4358 cpu_physical_memory_set_dirty_flags(addr1,
4359 (0xff & ~CODE_DIRTY_FLAG));
4360 }
4361 }
4362}
4363
4364/* XXX: optimize */
4365void stb_phys(target_phys_addr_t addr, uint32_t val)
4366{
4367 uint8_t v = val;
4368 cpu_physical_memory_write(addr, &v, 1);
4369}
4370
4371/* warning: addr must be aligned */
4372void stw_phys(target_phys_addr_t addr, uint32_t val)
4373{
4374 int io_index;
4375 uint8_t *ptr;
4376 ram_addr_t pd;
4377 PhysPageDesc *p;
4378
4379 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4380 if (!p) {
4381 pd = IO_MEM_UNASSIGNED;
4382 } else {
4383 pd = p->phys_offset;
4384 }
4385
4386 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4387 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4388 if (p)
4389 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4390 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4391 } else {
4392 ram_addr_t addr1;
4393 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4394 /* RAM case */
4395#ifndef VBOX
4396 ptr = qemu_get_ram_ptr(addr1);
4397 stw_p(ptr, val);
4398#else
4399 remR3PhysWriteU16(addr1, val); NOREF(ptr);
4400#endif
4401 if (!cpu_physical_memory_is_dirty(addr1)) {
4402 /* invalidate code */
4403 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4404 /* set dirty bit */
4405 cpu_physical_memory_set_dirty_flags(addr1,
4406 (0xff & ~CODE_DIRTY_FLAG));
4407 }
4408 }
4409}
4410
4411/* XXX: optimize */
4412void stq_phys(target_phys_addr_t addr, uint64_t val)
4413{
4414 val = tswap64(val);
4415 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
4416}
4417
4418#ifndef VBOX
4419/* virtual memory access for debug (includes writing to ROM) */
4420int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4421 uint8_t *buf, int len, int is_write)
4422{
4423 int l;
4424 target_phys_addr_t phys_addr;
4425 target_ulong page;
4426
4427 while (len > 0) {
4428 page = addr & TARGET_PAGE_MASK;
4429 phys_addr = cpu_get_phys_page_debug(env, page);
4430 /* if no physical page mapped, return an error */
4431 if (phys_addr == -1)
4432 return -1;
4433 l = (page + TARGET_PAGE_SIZE) - addr;
4434 if (l > len)
4435 l = len;
4436 phys_addr += (addr & ~TARGET_PAGE_MASK);
4437 if (is_write)
4438 cpu_physical_memory_write_rom(phys_addr, buf, l);
4439 else
4440 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4441 len -= l;
4442 buf += l;
4443 addr += l;
4444 }
4445 return 0;
4446}
4447#endif /* !VBOX */
4448#endif
4449
4450/* in deterministic execution mode, instructions doing device I/Os
4451 must be at the end of the TB */
4452void cpu_io_recompile(CPUState *env, void *retaddr)
4453{
4454 TranslationBlock *tb;
4455 uint32_t n, cflags;
4456 target_ulong pc, cs_base;
4457 uint64_t flags;
4458
4459 tb = tb_find_pc((uintptr_t)retaddr);
4460 if (!tb) {
4461 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4462 retaddr);
4463 }
4464 n = env->icount_decr.u16.low + tb->icount;
4465 cpu_restore_state(tb, env, (uintptr_t)retaddr, NULL);
4466 /* Calculate how many instructions had been executed before the fault
4467 occurred. */
4468 n = n - env->icount_decr.u16.low;
4469 /* Generate a new TB ending on the I/O insn. */
4470 n++;
4471 /* On MIPS and SH, delay slot instructions can only be restarted if
4472 they were already the first instruction in the TB. If this is not
4473 the first instruction in a TB then re-execute the preceding
4474 branch. */
4475#if defined(TARGET_MIPS)
4476 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4477 env->active_tc.PC -= 4;
4478 env->icount_decr.u16.low++;
4479 env->hflags &= ~MIPS_HFLAG_BMASK;
4480 }
4481#elif defined(TARGET_SH4)
4482 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4483 && n > 1) {
4484 env->pc -= 2;
4485 env->icount_decr.u16.low++;
4486 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4487 }
4488#endif
4489 /* This should never happen. */
4490 if (n > CF_COUNT_MASK)
4491 cpu_abort(env, "TB too big during recompile");
4492
4493 cflags = n | CF_LAST_IO;
4494 pc = tb->pc;
4495 cs_base = tb->cs_base;
4496 flags = tb->flags;
4497 tb_phys_invalidate(tb, -1);
4498 /* FIXME: In theory this could raise an exception. In practice
4499 we have already translated the block once so it's probably ok. */
4500 tb_gen_code(env, pc, cs_base, flags, cflags);
4501 /** @todo If env->pc != tb->pc (i.e. the faulting instruction was not
4502 the first in the TB) then we end up generating a whole new TB and
4503 repeating the fault, which is horribly inefficient.
4504 Better would be to execute just this insn uncached, or generate a
4505 second new TB. */
4506 cpu_resume_from_signal(env, NULL);
4507}
4508
4509#if !defined(CONFIG_USER_ONLY)
4510
4511#ifndef VBOX
4512void dump_exec_info(FILE *f,
4513 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4514{
4515 int i, target_code_size, max_target_code_size;
4516 int direct_jmp_count, direct_jmp2_count, cross_page;
4517 TranslationBlock *tb;
4518
4519 target_code_size = 0;
4520 max_target_code_size = 0;
4521 cross_page = 0;
4522 direct_jmp_count = 0;
4523 direct_jmp2_count = 0;
4524 for(i = 0; i < nb_tbs; i++) {
4525 tb = &tbs[i];
4526 target_code_size += tb->size;
4527 if (tb->size > max_target_code_size)
4528 max_target_code_size = tb->size;
4529 if (tb->page_addr[1] != -1)
4530 cross_page++;
4531 if (tb->tb_next_offset[0] != 0xffff) {
4532 direct_jmp_count++;
4533 if (tb->tb_next_offset[1] != 0xffff) {
4534 direct_jmp2_count++;
4535 }
4536 }
4537 }
4538 /* XXX: avoid using doubles ? */
4539 cpu_fprintf(f, "Translation buffer state:\n");
4540 cpu_fprintf(f, "gen code size %ld/%ld\n",
4541 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4542 cpu_fprintf(f, "TB count %d/%d\n",
4543 nb_tbs, code_gen_max_blocks);
4544 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4545 nb_tbs ? target_code_size / nb_tbs : 0,
4546 max_target_code_size);
4547 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4548 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4549 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4550 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4551 cross_page,
4552 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4553 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4554 direct_jmp_count,
4555 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4556 direct_jmp2_count,
4557 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4558 cpu_fprintf(f, "\nStatistics:\n");
4559 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4560 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4561 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4562 tcg_dump_info(f, cpu_fprintf);
4563}
4564#endif /* !VBOX */
4565
4566#define MMUSUFFIX _cmmu
4567#define GETPC() NULL
4568#define env cpu_single_env
4569#define SOFTMMU_CODE_ACCESS
4570
4571#define SHIFT 0
4572#include "softmmu_template.h"
4573
4574#define SHIFT 1
4575#include "softmmu_template.h"
4576
4577#define SHIFT 2
4578#include "softmmu_template.h"
4579
4580#define SHIFT 3
4581#include "softmmu_template.h"
4582
4583#undef env
4584
4585#endif
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use