VirtualBox

source: vbox/trunk/src/recompiler/softmmu_template.h@ 76553

Last change on this file since 76553 was 69465, checked in by vboxsync, 7 years ago

recompiler: scm updates

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 13.1 KB
RevLine 
[1]1/*
2 * Software MMU support
[26499]3 *
[1]4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
[36175]17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
[1]18 */
[11982]19
20/*
[33656]21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
[11982]23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
[37829]28
[37689]29#include "qemu-timer.h"
[33656]30
[1]31#define DATA_SIZE (1 << SHIFT)
32
33#if DATA_SIZE == 8
34#define SUFFIX q
35#define USUFFIX q
36#define DATA_TYPE uint64_t
[36175]37#ifdef VBOX
38# define DATA_TYPE_PROMOTED uint64_t
39#endif
[1]40#elif DATA_SIZE == 4
41#define SUFFIX l
42#define USUFFIX l
43#define DATA_TYPE uint32_t
[15034]44#ifdef VBOX
[36175]45# define DATA_TYPE_PROMOTED RTCCUINTREG
[15034]46#endif
[1]47#elif DATA_SIZE == 2
48#define SUFFIX w
49#define USUFFIX uw
50#define DATA_TYPE uint16_t
[15034]51#ifdef VBOX
[36175]52# define DATA_TYPE_PROMOTED RTCCUINTREG
[15034]53#endif
[1]54#elif DATA_SIZE == 1
55#define SUFFIX b
56#define USUFFIX ub
57#define DATA_TYPE uint8_t
[15034]58#ifdef VBOX
[36175]59# define DATA_TYPE_PROMOTED RTCCUINTREG
[15034]60#endif
[1]61#else
62#error unsupported data size
63#endif
64
65#ifdef SOFTMMU_CODE_ACCESS
66#define READ_ACCESS_TYPE 2
[2422]67#define ADDR_READ addr_code
[1]68#else
69#define READ_ACCESS_TYPE 0
[2422]70#define ADDR_READ addr_read
[1]71#endif
72
[13731]73static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
74 int mmu_idx,
[1]75 void *retaddr);
[26499]76static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
[13337]77 target_ulong addr,
78 void *retaddr)
[1]79{
80 DATA_TYPE res;
81 int index;
[13337]82 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
83 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
[42601]84 env->mem_io_pc = (uintptr_t)retaddr;
[13337]85 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
86 && !can_do_io(env)) {
87 cpu_io_recompile(env, retaddr);
88 }
[1]89
[36170]90 env->mem_io_vaddr = addr;
[1]91#if SHIFT <= 2
92 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
93#else
94#ifdef TARGET_WORDS_BIGENDIAN
95 res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
96 res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
97#else
98 res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
99 res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
100#endif
101#endif /* SHIFT > 2 */
102 return res;
103}
104
105/* handle all cases except unaligned access which span two pages */
[15034]106#ifndef VBOX
[13337]107DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
108 int mmu_idx)
[15034]109#else
110/* Load helpers invoked from generated code, and TCG makes an assumption
111 that valid value takes the whole register, why gcc after 4.3 may
[33540]112 use only lower part of register for smaller types. So force promotion. */
[26499]113DATA_TYPE_PROMOTED REGPARM
[15034]114glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
115 int mmu_idx)
116#endif
[1]117{
118 DATA_TYPE res;
119 int index;
120 target_ulong tlb_addr;
[37689]121 target_phys_addr_t ioaddr;
[42601]122 uintptr_t addend;
[1]123 void *retaddr;
[13337]124
[1]125 /* test if there is match for unaligned or IO access */
126 /* XXX: could done more in memory macro in a non portable way */
127 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
128 redo:
[13337]129 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
[1]130 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
131 if (tlb_addr & ~TARGET_PAGE_MASK) {
132 /* IO access */
133 if ((addr & (DATA_SIZE - 1)) != 0)
134 goto do_unaligned_access;
[13337]135 retaddr = GETPC();
[37689]136 ioaddr = env->iotlb[mmu_idx][index];
137 res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
[2422]138 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
[1]139 /* slow unaligned access (it spans two pages or IO) */
140 do_unaligned_access:
141 retaddr = GETPC();
[2422]142#ifdef ALIGNED_ONLY
[13337]143 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
[2422]144#endif
[13337]145 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
146 mmu_idx, retaddr);
[1]147 } else {
[2422]148 /* unaligned/aligned access in the same page */
149#ifdef ALIGNED_ONLY
150 if ((addr & (DATA_SIZE - 1)) != 0) {
151 retaddr = GETPC();
[13337]152 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
[2422]153 }
154#endif
[13337]155 addend = env->tlb_table[mmu_idx][index].addend;
[42601]156 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(uintptr_t)(addr+addend));
[1]157 }
158 } else {
159 /* the page is not in the TLB : fill it */
160 retaddr = GETPC();
[2422]161#ifdef ALIGNED_ONLY
162 if ((addr & (DATA_SIZE - 1)) != 0)
[13337]163 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
[2422]164#endif
[13337]165 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
[1]166 goto redo;
167 }
168 return res;
169}
170
171/* handle all unaligned cases */
[13337]172static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
173 int mmu_idx,
[1]174 void *retaddr)
175{
176 DATA_TYPE res, res1, res2;
177 int index, shift;
[37689]178 target_phys_addr_t ioaddr;
[42601]179 uintptr_t addend;
[1]180 target_ulong tlb_addr, addr1, addr2;
181
182 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
183 redo:
[13337]184 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
[1]185 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
186 if (tlb_addr & ~TARGET_PAGE_MASK) {
187 /* IO access */
188 if ((addr & (DATA_SIZE - 1)) != 0)
189 goto do_unaligned_access;
[37689]190 ioaddr = env->iotlb[mmu_idx][index];
191 res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
[2422]192 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
[1]193 do_unaligned_access:
194 /* slow unaligned access (it spans two pages) */
195 addr1 = addr & ~(DATA_SIZE - 1);
196 addr2 = addr1 + DATA_SIZE;
[13337]197 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
198 mmu_idx, retaddr);
199 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
200 mmu_idx, retaddr);
[1]201 shift = (addr & (DATA_SIZE - 1)) * 8;
202#ifdef TARGET_WORDS_BIGENDIAN
203 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
204#else
205 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
206#endif
207 res = (DATA_TYPE)res;
208 } else {
209 /* unaligned/aligned access in the same page */
[13337]210 addend = env->tlb_table[mmu_idx][index].addend;
[42601]211 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(uintptr_t)(addr+addend));
[1]212 }
213 } else {
214 /* the page is not in the TLB : fill it */
[13337]215 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
[1]216 goto redo;
217 }
218 return res;
219}
220
221#ifndef SOFTMMU_CODE_ACCESS
222
[26499]223static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
224 DATA_TYPE val,
[13731]225 int mmu_idx,
[1]226 void *retaddr);
227
[26499]228static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
[1]229 DATA_TYPE val,
[13337]230 target_ulong addr,
[1]231 void *retaddr)
232{
233 int index;
[13337]234 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
235 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
236 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
237 && !can_do_io(env)) {
238 cpu_io_recompile(env, retaddr);
239 }
[1]240
[13337]241 env->mem_io_vaddr = addr;
[42601]242 env->mem_io_pc = (uintptr_t)retaddr;
[1]243#if SHIFT <= 2
244 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
245#else
246#ifdef TARGET_WORDS_BIGENDIAN
247 io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
248 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
249#else
250 io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
251 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
252#endif
253#endif /* SHIFT > 2 */
254}
255
[13337]256void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
257 DATA_TYPE val,
258 int mmu_idx)
[1]259{
[37689]260 target_phys_addr_t ioaddr;
[42601]261 uintptr_t addend;
[1]262 target_ulong tlb_addr;
263 void *retaddr;
264 int index;
[13337]265
[1]266 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
267 redo:
[13337]268 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
[1]269 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
270 if (tlb_addr & ~TARGET_PAGE_MASK) {
271 /* IO access */
272 if ((addr & (DATA_SIZE - 1)) != 0)
273 goto do_unaligned_access;
274 retaddr = GETPC();
[37689]275 ioaddr = env->iotlb[mmu_idx][index];
276 glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
[2422]277 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
[1]278 do_unaligned_access:
279 retaddr = GETPC();
[2422]280#ifdef ALIGNED_ONLY
[13337]281 do_unaligned_access(addr, 1, mmu_idx, retaddr);
[2422]282#endif
[13337]283 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
284 mmu_idx, retaddr);
[1]285 } else {
286 /* aligned/unaligned access in the same page */
[2422]287#ifdef ALIGNED_ONLY
288 if ((addr & (DATA_SIZE - 1)) != 0) {
289 retaddr = GETPC();
[13337]290 do_unaligned_access(addr, 1, mmu_idx, retaddr);
[2422]291 }
292#endif
[13337]293 addend = env->tlb_table[mmu_idx][index].addend;
[42601]294 glue(glue(st, SUFFIX), _raw)((uint8_t *)(uintptr_t)(addr+addend), val);
[1]295 }
296 } else {
297 /* the page is not in the TLB : fill it */
298 retaddr = GETPC();
[2422]299#ifdef ALIGNED_ONLY
300 if ((addr & (DATA_SIZE - 1)) != 0)
[13337]301 do_unaligned_access(addr, 1, mmu_idx, retaddr);
[2422]302#endif
[13337]303 tlb_fill(addr, 1, mmu_idx, retaddr);
[1]304 goto redo;
305 }
306}
307
308/* handles all unaligned cases */
[13337]309static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
[1]310 DATA_TYPE val,
[13337]311 int mmu_idx,
[1]312 void *retaddr)
313{
[37689]314 target_phys_addr_t ioaddr;
[42601]315 uintptr_t addend;
[1]316 target_ulong tlb_addr;
317 int index, i;
318
319 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
320 redo:
[13337]321 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
[1]322 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
323 if (tlb_addr & ~TARGET_PAGE_MASK) {
324 /* IO access */
325 if ((addr & (DATA_SIZE - 1)) != 0)
326 goto do_unaligned_access;
[37689]327 ioaddr = env->iotlb[mmu_idx][index];
328 glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
[2422]329 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
[1]330 do_unaligned_access:
331 /* XXX: not efficient, but simple */
[13337]332 /* Note: relies on the fact that tlb_fill() does not remove the
333 * previous page from the TLB cache. */
334 for(i = DATA_SIZE - 1; i >= 0; i--) {
[1]335#ifdef TARGET_WORDS_BIGENDIAN
[13337]336 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
337 mmu_idx, retaddr);
[1]338#else
[13337]339 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
340 mmu_idx, retaddr);
[1]341#endif
342 }
343 } else {
344 /* aligned/unaligned access in the same page */
[13337]345 addend = env->tlb_table[mmu_idx][index].addend;
[42601]346 glue(glue(st, SUFFIX), _raw)((uint8_t *)(uintptr_t)(addr+addend), val);
[1]347 }
348 } else {
349 /* the page is not in the TLB : fill it */
[13337]350 tlb_fill(addr, 1, mmu_idx, retaddr);
[1]351 goto redo;
352 }
353}
354
355#endif /* !defined(SOFTMMU_CODE_ACCESS) */
356
[15034]357#ifdef VBOX
[36175]358# undef DATA_TYPE_PROMOTED
[15034]359#endif
[1]360#undef READ_ACCESS_TYPE
361#undef SHIFT
362#undef DATA_TYPE
363#undef SUFFIX
364#undef USUFFIX
365#undef DATA_SIZE
[2422]366#undef ADDR_READ
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use