VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0JmpA-amd64.asm

Last change on this file was 100337, checked in by vboxsync, 11 months ago

VMM/VMMR0JmpA-amd64.asm: Clarified comment; use simpler instruction for getting RSP.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 10.6 KB
RevLine 
[19]1; $Id: VMMR0JmpA-amd64.asm 100337 2023-06-30 23:53:06Z vboxsync $
[1]2;; @file
[20541]3; VMM - R0 SetJmp / LongJmp routines for AMD64.
[1]4;
5
[19]6;
[98103]7; Copyright (C) 2006-2023 Oracle and/or its affiliates.
[4477]8;
[96407]9; This file is part of VirtualBox base platform packages, as
10; available from https://www.virtualbox.org.
[5999]11;
[96407]12; This program is free software; you can redistribute it and/or
13; modify it under the terms of the GNU General Public License
14; as published by the Free Software Foundation, in version 3 of the
15; License.
16;
17; This program is distributed in the hope that it will be useful, but
18; WITHOUT ANY WARRANTY; without even the implied warranty of
19; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20; General Public License for more details.
21;
22; You should have received a copy of the GNU General Public License
23; along with this program; if not, see <https://www.gnu.org/licenses>.
24;
25; SPDX-License-Identifier: GPL-3.0-only
26;
[1]27
[92408]28;*********************************************************************************************************************************
29;* Header Files *
30;*********************************************************************************************************************************
[87456]31%define RT_ASM_WITH_SEH64_ALT
[19]32%include "VBox/asmdefs.mac"
[35333]33%include "VMMInternal.mac"
[39402]34%include "VBox/err.mac"
[20543]35%include "VBox/param.mac"
[1]36
37
38BEGINCODE
39
40;;
41; The setjmp variant used for calling Ring-3.
42;
[20874]43; This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
[1]44; in the middle of a ring-3 call. Another differences is the function pointer and
45; argument. This has to do with resuming code and the stack frame of the caller.
46;
[20875]47; @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
[13872]48; @param pJmpBuf msc:rcx gcc:rdi x86:[esp+0x04] Our jmp_buf.
49; @param pfn msc:rdx gcc:rsi x86:[esp+0x08] The function to be called when not resuming.
50; @param pvUser1 msc:r8 gcc:rdx x86:[esp+0x0c] The argument of that function.
51; @param pvUser2 msc:r9 gcc:rcx x86:[esp+0x10] The argument of that function.
[1]52;
[71222]53GLOBALNAME vmmR0CallRing3SetJmp2
[20875]54GLOBALNAME vmmR0CallRing3SetJmpEx
[87456]55BEGINPROC vmmR0CallRing3SetJmp
[1]56 ;
57 ; Save the registers.
58 ;
[4477]59 push rbp
[73471]60 SEH64_PUSH_xBP
[4477]61 mov rbp, rsp
[73471]62 SEH64_SET_FRAME_xBP 0
[726]63 %ifdef ASM_CALL64_MSC
[92408]64 sub rsp, 30h ; (10h is used by resume (??), 20h for callee spill area)
65 SEH64_ALLOCATE_STACK 30h
[73471]66SEH64_END_PROLOGUE
[727]67 mov r11, rdx ; pfn
68 mov rdx, rcx ; pJmpBuf;
[726]69 %else
[92408]70 sub rsp, 10h ; (10h is used by resume (??))
71 SEH64_ALLOCATE_STACK 10h
[73471]72SEH64_END_PROLOGUE
[13872]73 mov r8, rdx ; pvUser1 (save it like MSC)
74 mov r9, rcx ; pvUser2 (save it like MSC)
[727]75 mov r11, rsi ; pfn
[726]76 mov rdx, rdi ; pJmpBuf
77 %endif
[20543]78 mov [xDX + VMMR0JMPBUF.rbx], rbx
[726]79 %ifdef ASM_CALL64_MSC
[20543]80 mov [xDX + VMMR0JMPBUF.rsi], rsi
81 mov [xDX + VMMR0JMPBUF.rdi], rdi
[726]82 %endif
[61806]83 mov [xDX + VMMR0JMPBUF.rbp], rbp
[20543]84 mov [xDX + VMMR0JMPBUF.r12], r12
85 mov [xDX + VMMR0JMPBUF.r13], r13
86 mov [xDX + VMMR0JMPBUF.r14], r14
87 mov [xDX + VMMR0JMPBUF.r15], r15
[61806]88 mov xAX, [rbp + 8] ; (not really necessary, except for validity check)
[20543]89 mov [xDX + VMMR0JMPBUF.rip], xAX
[61806]90 %ifdef ASM_CALL64_MSC
[100337]91 lea r10, [rsp + 20h] ; Must skip the callee spill area.
[61806]92 %else
[100337]93 mov r10, rsp
[61806]94 %endif
[20543]95 mov [xDX + VMMR0JMPBUF.rsp], r10
[20992]96 %ifdef RT_OS_WINDOWS
97 movdqa [xDX + VMMR0JMPBUF.xmm6], xmm6
98 movdqa [xDX + VMMR0JMPBUF.xmm7], xmm7
99 movdqa [xDX + VMMR0JMPBUF.xmm8], xmm8
100 movdqa [xDX + VMMR0JMPBUF.xmm9], xmm9
101 movdqa [xDX + VMMR0JMPBUF.xmm10], xmm10
102 movdqa [xDX + VMMR0JMPBUF.xmm11], xmm11
103 movdqa [xDX + VMMR0JMPBUF.xmm12], xmm12
104 movdqa [xDX + VMMR0JMPBUF.xmm13], xmm13
105 movdqa [xDX + VMMR0JMPBUF.xmm14], xmm14
106 movdqa [xDX + VMMR0JMPBUF.xmm15], xmm15
107 %endif
[23487]108 pushf
109 pop xAX
110 mov [xDX + VMMR0JMPBUF.rflags], xAX
[726]111
112 ;
[92408]113 ; Save the call then make it.
[726]114 ;
[90189]115 mov [xDX + VMMR0JMPBUF.pfn], r11
116 mov [xDX + VMMR0JMPBUF.pvUser1], r8
117 mov [xDX + VMMR0JMPBUF.pvUser2], r9
118
[18849]119 mov r12, rdx ; Save pJmpBuf.
[726]120 %ifdef ASM_CALL64_MSC
[4979]121 mov rcx, r8 ; pvUser -> arg0
[13872]122 mov rdx, r9
[726]123 %else
[4979]124 mov rdi, r8 ; pvUser -> arg0
[13872]125 mov rsi, r9
[726]126 %endif
[727]127 call r11
[18849]128 mov rdx, r12 ; Restore pJmpBuf
[7507]129
[18849]130 ;
[61806]131 ; Return like in the long jump but clear eip, no shortcuts here.
[18849]132 ;
133.proper_return:
[20992]134%ifdef RT_OS_WINDOWS
135 movdqa xmm6, [xDX + VMMR0JMPBUF.xmm6 ]
136 movdqa xmm7, [xDX + VMMR0JMPBUF.xmm7 ]
137 movdqa xmm8, [xDX + VMMR0JMPBUF.xmm8 ]
138 movdqa xmm9, [xDX + VMMR0JMPBUF.xmm9 ]
139 movdqa xmm10, [xDX + VMMR0JMPBUF.xmm10]
140 movdqa xmm11, [xDX + VMMR0JMPBUF.xmm11]
141 movdqa xmm12, [xDX + VMMR0JMPBUF.xmm12]
142 movdqa xmm13, [xDX + VMMR0JMPBUF.xmm13]
143 movdqa xmm14, [xDX + VMMR0JMPBUF.xmm14]
144 movdqa xmm15, [xDX + VMMR0JMPBUF.xmm15]
145%endif
[20543]146 mov rbx, [xDX + VMMR0JMPBUF.rbx]
[20992]147%ifdef ASM_CALL64_MSC
[20543]148 mov rsi, [xDX + VMMR0JMPBUF.rsi]
149 mov rdi, [xDX + VMMR0JMPBUF.rdi]
[20992]150%endif
[20543]151 mov r12, [xDX + VMMR0JMPBUF.r12]
152 mov r13, [xDX + VMMR0JMPBUF.r13]
153 mov r14, [xDX + VMMR0JMPBUF.r14]
154 mov r15, [xDX + VMMR0JMPBUF.r15]
155 mov rbp, [xDX + VMMR0JMPBUF.rbp]
156 and qword [xDX + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
157 mov rsp, [xDX + VMMR0JMPBUF.rsp]
[23487]158 push qword [xDX + VMMR0JMPBUF.rflags]
159 popf
[61806]160 leave
161 ret
[20875]162ENDPROC vmmR0CallRing3SetJmp
[1]163
164
165;;
[20874]166; Worker for VMMRZCallRing3.
[1]167; This will save the stack and registers.
168;
[726]169; @param pJmpBuf msc:rcx gcc:rdi x86:[ebp+8] Pointer to the jump buffer.
170; @param rc msc:rdx gcc:rsi x86:[ebp+c] The return code.
[1]171;
[20875]172BEGINPROC vmmR0CallRing3LongJmp
[1]173 ;
174 ; Save the registers on the stack.
175 ;
[726]176 push rbp
[73471]177 SEH64_PUSH_xBP
[726]178 mov rbp, rsp
[73471]179 SEH64_SET_FRAME_xBP 0
[726]180 push r15
[73471]181 SEH64_PUSH_GREG r15
[726]182 push r14
[73471]183 SEH64_PUSH_GREG r14
[726]184 push r13
[73471]185 SEH64_PUSH_GREG r13
[726]186 push r12
[73471]187 SEH64_PUSH_GREG r12
[20992]188%ifdef ASM_CALL64_MSC
[726]189 push rdi
[73471]190 SEH64_PUSH_GREG rdi
[726]191 push rsi
[73471]192 SEH64_PUSH_GREG rsi
[20992]193%endif
[726]194 push rbx
[73471]195 SEH64_PUSH_GREG rbx
[726]196 pushf
[73471]197 SEH64_ALLOCATE_STACK 8
[20992]198%ifdef RT_OS_WINDOWS
199 sub rsp, 0a0h
[73471]200 SEH64_ALLOCATE_STACK 0a0h
[20992]201 movdqa [rsp + 000h], xmm6
202 movdqa [rsp + 010h], xmm7
203 movdqa [rsp + 020h], xmm8
204 movdqa [rsp + 030h], xmm9
205 movdqa [rsp + 040h], xmm10
206 movdqa [rsp + 050h], xmm11
207 movdqa [rsp + 060h], xmm12
208 movdqa [rsp + 070h], xmm13
209 movdqa [rsp + 080h], xmm14
210 movdqa [rsp + 090h], xmm15
211%endif
[73471]212SEH64_END_PROLOGUE
[726]213
214 ;
215 ; Normalize the parameters.
216 ;
[20992]217%ifdef ASM_CALL64_MSC
[2820]218 mov eax, edx ; rc
[2819]219 mov rdx, rcx ; pJmpBuf
[20992]220%else
[726]221 mov rdx, rdi ; pJmpBuf
222 mov eax, esi ; rc
[20992]223%endif
[726]224
225 ;
226 ; Is the jump buffer armed?
227 ;
[20543]228 cmp qword [xDX + VMMR0JMPBUF.rip], byte 0
[726]229 je .nok
230
231 ;
[92408]232 ; Also check that the stack is in the vicinity of the RSP we entered
233 ; on so the stack mirroring below doesn't go wild.
[726]234 ;
235 mov rsi, rsp
[20543]236 mov rcx, [xDX + VMMR0JMPBUF.rsp]
[726]237 sub rcx, rsi
[92408]238 cmp rcx, _64K
[18849]239 jnbe .nok
[726]240
[18849]241 ;
[92408]242 ; Save a PC and return PC here to assist unwinding.
[18849]243 ;
[73471]244.unwind_point:
245 lea rcx, [.unwind_point wrt RIP]
[92408]246 mov [xDX + VMMR0JMPBUF.UnwindPc], rcx
[73471]247 mov rcx, [xDX + VMMR0JMPBUF.rbp]
248 lea rcx, [rcx + 8]
249 mov [xDX + VMMR0JMPBUF.UnwindRetPcLocation], rcx
250 mov rcx, [rcx]
251 mov [xDX + VMMR0JMPBUF.UnwindRetPcValue], rcx
252
[19575]253 ; Save RSP & RBP to enable stack dumps
[92408]254 mov [xDX + VMMR0JMPBUF.UnwindSp], rsp
[19575]255 mov rcx, rbp
[92408]256 mov [xDX + VMMR0JMPBUF.UnwindBp], rcx
[19575]257 sub rcx, 8
[92408]258 mov [xDX + VMMR0JMPBUF.UnwindRetSp], rcx
[19575]259
[92408]260 ;
261 ; Make sure the direction flag is clear before we do any rep movsb below.
262 ;
263 cld
264
265 ;
266 ; Mirror the stack.
267 ;
268 xor ebx, ebx
269
270 mov rdi, [xDX + VMMR0JMPBUF.pvStackBuf]
271 or rdi, rdi
272 jz .skip_stack_mirroring
273
274 mov ebx, [xDX + VMMR0JMPBUF.cbStackBuf]
275 or ebx, ebx
276 jz .skip_stack_mirroring
277
[20543]278 mov rcx, [xDX + VMMR0JMPBUF.rsp]
[92408]279 sub rcx, rsp
280 and rcx, ~0fffh ; copy up to the page boundrary
[726]281
[92408]282 cmp rcx, rbx ; rbx = rcx = RT_MIN(rbx, rcx);
283 jbe .do_stack_buffer_big_enough
284 mov ecx, ebx ; too much to copy, limit to ebx
285 jmp .do_stack_copying
286.do_stack_buffer_big_enough:
287 mov ebx, ecx ; ecx is smaller, update ebx for cbStackValid
288
289.do_stack_copying:
290 mov rsi, rsp
291 rep movsb
292
293.skip_stack_mirroring:
294 mov [xDX + VMMR0JMPBUF.cbStackValid], ebx
295
[726]296 ;
[92408]297 ; Do buffer mirroring.
298 ;
299 mov rdi, [xDX + VMMR0JMPBUF.pMirrorBuf]
300 or rdi, rdi
301 jz .skip_buffer_mirroring
302 mov rsi, rdx
303 mov ecx, VMMR0JMPBUF_size
304 rep movsb
305.skip_buffer_mirroring:
306
307 ;
[726]308 ; Do the long jump.
309 ;
[20992]310%ifdef RT_OS_WINDOWS
311 movdqa xmm6, [xDX + VMMR0JMPBUF.xmm6 ]
312 movdqa xmm7, [xDX + VMMR0JMPBUF.xmm7 ]
313 movdqa xmm8, [xDX + VMMR0JMPBUF.xmm8 ]
314 movdqa xmm9, [xDX + VMMR0JMPBUF.xmm9 ]
315 movdqa xmm10, [xDX + VMMR0JMPBUF.xmm10]
316 movdqa xmm11, [xDX + VMMR0JMPBUF.xmm11]
317 movdqa xmm12, [xDX + VMMR0JMPBUF.xmm12]
318 movdqa xmm13, [xDX + VMMR0JMPBUF.xmm13]
319 movdqa xmm14, [xDX + VMMR0JMPBUF.xmm14]
320 movdqa xmm15, [xDX + VMMR0JMPBUF.xmm15]
321%endif
[20543]322 mov rbx, [xDX + VMMR0JMPBUF.rbx]
[20992]323%ifdef ASM_CALL64_MSC
[20543]324 mov rsi, [xDX + VMMR0JMPBUF.rsi]
325 mov rdi, [xDX + VMMR0JMPBUF.rdi]
[20992]326%endif
[20543]327 mov r12, [xDX + VMMR0JMPBUF.r12]
328 mov r13, [xDX + VMMR0JMPBUF.r13]
329 mov r14, [xDX + VMMR0JMPBUF.r14]
330 mov r15, [xDX + VMMR0JMPBUF.r15]
331 mov rbp, [xDX + VMMR0JMPBUF.rbp]
332 mov rsp, [xDX + VMMR0JMPBUF.rsp]
[23487]333 push qword [xDX + VMMR0JMPBUF.rflags]
334 popf
[61806]335 leave
336 ret
[18849]337
338 ;
339 ; Failure
340 ;
341.nok:
[39402]342 mov eax, VERR_VMM_LONG_JMP_ERROR
[20992]343%ifdef RT_OS_WINDOWS
344 add rsp, 0a0h ; skip XMM registers since they are unmodified.
345%endif
[18849]346 popf
347 pop rbx
[20992]348%ifdef ASM_CALL64_MSC
[18849]349 pop rsi
350 pop rdi
[20992]351%endif
[18849]352 pop r12
353 pop r13
354 pop r14
355 pop r15
356 leave
357 ret
[20875]358ENDPROC vmmR0CallRing3LongJmp
[1]359
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use