VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/win/SUPR3HardenedMainA-win.asm

Last change on this file was 98103, checked in by vboxsync, 16 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 11.9 KB
Line 
1; $Id: SUPR3HardenedMainA-win.asm 98103 2023-01-17 14:15:46Z vboxsync $
2;; @file
3; VirtualBox Support Library - Hardened main(), Windows assembly bits.
4;
5
6;
7; Copyright (C) 2012-2023 Oracle and/or its affiliates.
8;
9; This file is part of VirtualBox base platform packages, as
10; available from https://www.virtualbox.org.
11;
12; This program is free software; you can redistribute it and/or
13; modify it under the terms of the GNU General Public License
14; as published by the Free Software Foundation, in version 3 of the
15; License.
16;
17; This program is distributed in the hope that it will be useful, but
18; WITHOUT ANY WARRANTY; without even the implied warranty of
19; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20; General Public License for more details.
21;
22; You should have received a copy of the GNU General Public License
23; along with this program; if not, see <https://www.gnu.org/licenses>.
24;
25; The contents of this file may alternatively be used under the terms
26; of the Common Development and Distribution License Version 1.0
27; (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28; in the VirtualBox distribution, in which case the provisions of the
29; CDDL are applicable instead of those of the GPL.
30;
31; You may elect to license modified versions of this file under the
32; terms and conditions of either the GPL or the CDDL or both.
33;
34; SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35;
36
37;*******************************************************************************
38;* Header Files *
39;*******************************************************************************
40%define RT_ASM_WITH_SEH64
41%include "iprt/asmdefs.mac"
42
43
44; External code.
45extern NAME(supR3HardenedEarlyProcessInit)
46extern NAME(supR3HardenedMonitor_KiUserApcDispatcher_C)
47%ifndef VBOX_WITHOUT_HARDENDED_XCPT_LOGGING
48extern NAME(supR3HardenedMonitor_KiUserExceptionDispatcher_C)
49%endif
50
51
52BEGINCODE
53
54
55;;
56; Alternative code for LdrInitializeThunk that performs the early process startup
57; for the Stub and VM processes.
58;
59; This does not concern itself with any arguments on stack or in registers that
60; may be passed to the LdrIntializeThunk routine as we just save and restore
61; them all before we restart the restored LdrInitializeThunk routine.
62;
63; @sa supR3HardenedEarlyProcessInit
64;
65BEGINPROC supR3HardenedEarlyProcessInitThunk
66 ;
67 ; Prologue.
68 ;
69
70 ; Reserve space for the "return" address.
71 push 0
72
73 ; Create a stack frame, saving xBP.
74 push xBP
75 SEH64_PUSH_xBP
76 mov xBP, xSP
77 SEH64_SET_FRAME_xBP 0 ; probably wrong...
78
79 ; Save all volatile registers.
80 push xAX
81 push xCX
82 push xDX
83%ifdef RT_ARCH_AMD64
84 push r8
85 push r9
86 push r10
87 push r11
88%endif
89
90 ; Reserve spill space and align the stack.
91 sub xSP, 20h
92 and xSP, ~0fh
93 SEH64_END_PROLOGUE
94
95 ;
96 ; Call the C/C++ code that does the actual work. This returns the
97 ; resume address in xAX, which we put in the "return" stack position.
98 ;
99 call NAME(supR3HardenedEarlyProcessInit)
100 mov [xBP + xCB], xAX
101
102 ;
103 ; Restore volatile registers.
104 ;
105 mov xAX, [xBP - xCB*1]
106 mov xCX, [xBP - xCB*2]
107 mov xDX, [xBP - xCB*3]
108%ifdef RT_ARCH_AMD64
109 mov r8, [xBP - xCB*4]
110 mov r9, [xBP - xCB*5]
111 mov r10, [xBP - xCB*6]
112 mov r11, [xBP - xCB*7]
113%endif
114 ;
115 ; Use the leave instruction to restore xBP and set up xSP to point at
116 ; the resume address. Then use the 'ret' instruction to resume process
117 ; initializaton.
118 ;
119 leave
120 ret
121ENDPROC supR3HardenedEarlyProcessInitThunk
122
123
124;;
125; Hook for KiUserApcDispatcher that validates user APC calls during early process
126; init to prevent calls going to or referring to executable memory we've freed
127; already.
128;
129; We just call C code here, just like supR3HardenedEarlyProcessInitThunk does.
130;
131; @sa supR3HardenedMonitor_KiUserApcDispatcher_C
132;
133BEGINPROC supR3HardenedMonitor_KiUserApcDispatcher
134 ;
135 ; Prologue.
136 ;
137
138 ; Reserve space for the "return" address.
139 push 0
140
141 ; Create a stack frame, saving xBP.
142 push xBP
143 SEH64_PUSH_xBP
144 mov xBP, xSP
145 SEH64_SET_FRAME_xBP 0 ; probably wrong...
146
147 ; Save all volatile registers.
148 push xAX
149 push xCX
150 push xDX
151%ifdef RT_ARCH_AMD64
152 push r8
153 push r9
154 push r10
155 push r11
156%endif
157
158 ; Reserve spill space and align the stack.
159 sub xSP, 20h
160 and xSP, ~0fh
161 SEH64_END_PROLOGUE
162
163 ;
164 ; Call the C/C++ code that does the actual work. This returns the
165 ; resume address in xAX, which we put in the "return" stack position.
166 ;
167 ; On AMD64, a CONTEXT structure is found at our RSP address when we're called.
168 ; On x86, there a 16 byte structure containing the two routines and their
169 ; arguments followed by a CONTEXT structure.
170 ;
171 lea xCX, [xBP + xCB + xCB]
172%ifdef RT_ARCH_X86
173 mov [xSP], xCX
174%endif
175 call NAME(supR3HardenedMonitor_KiUserApcDispatcher_C)
176 mov [xBP + xCB], xAX
177
178 ;
179 ; Restore volatile registers.
180 ;
181 mov xAX, [xBP - xCB*1]
182 mov xCX, [xBP - xCB*2]
183 mov xDX, [xBP - xCB*3]
184%ifdef RT_ARCH_AMD64
185 mov r8, [xBP - xCB*4]
186 mov r9, [xBP - xCB*5]
187 mov r10, [xBP - xCB*6]
188 mov r11, [xBP - xCB*7]
189%endif
190 ;
191 ; Use the leave instruction to restore xBP and set up xSP to point at
192 ; the resume address. Then use the 'ret' instruction to execute the
193 ; original KiUserApcDispatcher code as if we've never been here...
194 ;
195 leave
196 ret
197ENDPROC supR3HardenedMonitor_KiUserApcDispatcher
198
199
200%ifndef VBOX_WITHOUT_HARDENDED_XCPT_LOGGING
201;;
202; Hook for KiUserExceptionDispatcher that logs exceptions.
203;
204; For the AMD64 variant, we're not directly intercepting the function itself, but
205; patching into a Wow64 callout that's done at the very start of the routine. RCX
206; and RDX are set to PEXCEPTION_RECORD and PCONTEXT respectively and there is a
207; return address. Also, we don't need to do any return-via-copied-out-code stuff.
208;
209; For X86 we hook the function and have PEXCEPTION_RECORD and PCONTEXT pointers on
210; the stack, but no return address.
211
212; We just call C code here, just like supR3HardenedEarlyProcessInitThunk and
213; supR3HardenedMonitor_KiUserApcDispatcher does.
214;
215; @sa supR3HardenedMonitor_KiUserExceptionDispatcher_C
216;
217BEGINPROC supR3HardenedMonitor_KiUserExceptionDispatcher
218 ;
219 ; Prologue.
220 ;
221
222 %ifndef RT_ARCH_AMD64
223 ; Reserve space for the "return" address.
224 push 0
225 %endif
226
227 ; Create a stack frame, saving xBP.
228 push xBP
229 SEH64_PUSH_xBP
230 mov xBP, xSP
231 SEH64_SET_FRAME_xBP 0 ; probably wrong...
232
233 ; Save all volatile registers.
234 push xAX
235 push xCX
236 push xDX
237 %ifdef RT_ARCH_AMD64
238 push r8
239 push r9
240 push r10
241 push r11
242 %endif
243
244 ; Reserve spill space and align the stack.
245 sub xSP, 20h
246 and xSP, ~0fh
247 SEH64_END_PROLOGUE
248
249 ;
250 ; Call the C/C++ code that does the actual work. For x86 this returns
251 ; the resume address in xAX, which we put in the "return" stack position.
252 ;
253 ; On both AMD64 and X86 we have two parameters on the stack that we
254 ; passes along to the C code (see function description for details).
255 ;
256 %ifdef RT_ARCH_X86
257 mov xCX, [xBP + xCB*2]
258 mov xDX, [xBP + xCB*3]
259 mov [xSP], xCX
260 mov [xSP+4], xDX
261 %endif
262 call NAME(supR3HardenedMonitor_KiUserExceptionDispatcher_C)
263 %ifdef RT_ARCH_X86
264 mov [xBP + xCB], xAX
265 %endif
266
267 ;
268 ; Restore volatile registers.
269 ;
270 mov xAX, [xBP - xCB*1]
271 mov xCX, [xBP - xCB*2]
272 mov xDX, [xBP - xCB*3]
273 %ifdef RT_ARCH_AMD64
274 mov r8, [xBP - xCB*4]
275 mov r9, [xBP - xCB*5]
276 mov r10, [xBP - xCB*6]
277 mov r11, [xBP - xCB*7]
278 %endif
279 ;
280 ; Use the leave instruction to restore xBP and set up xSP to point at
281 ; the resume address. Then use the 'ret' instruction to execute the
282 ; original KiUserExceptionDispatcher code as if we've never been here...
283 ;
284 leave
285 ret
286ENDPROC supR3HardenedMonitor_KiUserExceptionDispatcher
287%endif ; !VBOX_WITHOUT_HARDENDED_XCPT_LOGGING
288
289;;
290; Composes a standard call name.
291%ifdef RT_ARCH_X86
292 %define SUPHNTIMP_STDCALL_NAME(a,b) _ %+ a %+ @ %+ b
293%else
294 %define SUPHNTIMP_STDCALL_NAME(a,b) NAME(a)
295%endif
296
297;; Concats two litterals.
298%define SUPHNTIMP_CONCAT(a,b) a %+ b
299
300
301;;
302; Import data and code for an API call.
303;
304; @param 1 The plain API name.
305; @param 2 The parameter frame size on x86. Multiple of dword.
306; @param 3 Non-zero expression if system call.
307; @param 4 Non-zero expression if early available call
308;
309%define SUPHNTIMP_SYSCALL 1
310%macro SupHardNtImport 4
311 ;
312 ; The data.
313 ;
314BEGINDATA
315global __imp_ %+ SUPHNTIMP_STDCALL_NAME(%1,%2) ; The import name used via dllimport.
316__imp_ %+ SUPHNTIMP_STDCALL_NAME(%1,%2):
317GLOBALNAME g_pfn %+ %1 ; The name we like to refer to.
318 RTCCPTR_DEF 0
319%if %3
320GLOBALNAME g_uApiNo %+ %1
321 RTCCPTR_DEF 0
322%endif
323
324 ;
325 ; The code: First a call stub.
326 ;
327BEGINCODE
328global SUPHNTIMP_STDCALL_NAME(%1, %2)
329SUPHNTIMP_STDCALL_NAME(%1, %2):
330 jmp RTCCPTR_PRE [NAME(g_pfn %+ %1) xWrtRIP]
331
332%if %3
333 ;
334 ; Make system calls.
335 ;
336 %ifdef RT_ARCH_AMD64
337BEGINPROC %1 %+ _SyscallType1
338 SEH64_END_PROLOGUE
339 mov eax, [NAME(g_uApiNo %+ %1) xWrtRIP]
340 mov r10, rcx
341 syscall
342 ret
343ENDPROC %1 %+ _SyscallType1
344BEGINPROC %1 %+ _SyscallType2 ; Introduced with build 10525
345 SEH64_END_PROLOGUE
346 mov eax, [NAME(g_uApiNo %+ %1) xWrtRIP]
347 test byte [07ffe0308h], 1 ; SharedUserData!Something
348 mov r10, rcx
349 jnz .int_alternative
350 syscall
351 ret
352.int_alternative:
353 int 2eh
354 ret
355ENDPROC %1 %+ _SyscallType2
356 %else
357BEGINPROC %1 %+ _SyscallType1
358 mov edx, 07ffe0300h ; SharedUserData!SystemCallStub
359 mov eax, [NAME(g_uApiNo %+ %1) xWrtRIP]
360 call dword [edx]
361 ret %2
362ENDPROC %1 %+ _SyscallType1
363BEGINPROC %1 %+ _SyscallType2
364 push .return
365 mov edx, esp
366 mov eax, [NAME(g_uApiNo %+ %1) xWrtRIP]
367 sysenter
368 add esp, 4
369.return:
370 ret %2
371ENDPROC %1 %+ _SyscallType2
372 %endif
373%endif
374
375%if %4 == 0
376global NAME(SUPHNTIMP_CONCAT(%1,_Early))
377NAME(SUPHNTIMP_CONCAT(%1,_Early)):
378 int3
379 %ifdef RT_ARCH_AMD64
380 ret
381 %else
382 ret %2
383 %endif
384%endif
385%endmacro
386
387%define SUPHARNT_COMMENT(a_Comment)
388%define SUPHARNT_IMPORT_SYSCALL(a_Name, a_cbParamsX86) SupHardNtImport a_Name, a_cbParamsX86, SUPHNTIMP_SYSCALL, 1
389%define SUPHARNT_IMPORT_STDCALL(a_Name, a_cbParamsX86) SupHardNtImport a_Name, a_cbParamsX86, 0, 0
390%define SUPHARNT_IMPORT_STDCALL_OPTIONAL(a_Name, a_cbParamsX86) SUPHARNT_IMPORT_STDCALL(a_Name, a_cbParamsX86)
391%define SUPHARNT_IMPORT_STDCALL_EARLY(a_Name, a_cbParamsX86) SupHardNtImport a_Name, a_cbParamsX86, 0, 1
392%define SUPHARNT_IMPORT_STDCALL_EARLY_OPTIONAL(a_Name, a_cbParamsX86) SUPHARNT_IMPORT_STDCALL_EARLY(a_Name, a_cbParamsX86)
393%include "import-template-ntdll.h"
394%include "import-template-kernel32.h"
395
396
397;
398; For simplified LdrLoadDll patching we define a special writable, readable and
399; exectuable section of 4KB where we can put jump back code.
400;
401section .rwxpg bss execute read write align=4096
402GLOBALNAME g_abSupHardReadWriteExecPage
403 resb 4096
404
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use