VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 43667

Last change on this file since 43667 was 43387, checked in by vboxsync, 12 years ago

VMM: HM cleanup.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 26.0 KB
Line 
1/* $Id: PDMAllCritSect.cpp 43387 2012-09-21 09:40:25Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsect.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** The number loops to spin for in ring-3. */
48#define PDMCRITSECT_SPIN_COUNT_R3 20
49/** The number loops to spin for in ring-0. */
50#define PDMCRITSECT_SPIN_COUNT_R0 256
51/** The number loops to spin for in the raw-mode context. */
52#define PDMCRITSECT_SPIN_COUNT_RC 256
53
54
55/* Undefine the automatic VBOX_STRICT API mappings. */
56#undef PDMCritSectEnter
57#undef PDMCritSectTryEnter
58
59
60/**
61 * Gets the ring-3 native thread handle of the calling thread.
62 *
63 * @returns native thread handle (ring-3).
64 * @param pCritSect The critical section. This is used in R0 and RC.
65 */
66DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
67{
68#ifdef IN_RING3
69 NOREF(pCritSect);
70 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
71#else
72 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
73 NIL_RTNATIVETHREAD);
74 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
75 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
76 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
77#endif
78 return hNativeSelf;
79}
80
81
82/**
83 * Tail code called when we've won the battle for the lock.
84 *
85 * @returns VINF_SUCCESS.
86 *
87 * @param pCritSect The critical section.
88 * @param hNativeSelf The native handle of this thread.
89 */
90DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
91{
92 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
93 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
94
95 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
96 Assert(pCritSect->s.Core.cNestings == 1);
97 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
98
99# ifdef PDMCRITSECT_STRICT
100 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
101# else
102 NOREF(pSrcPos);
103# endif
104
105 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
106 return VINF_SUCCESS;
107}
108
109
110#if defined(IN_RING3) || defined(IN_RING0)
111/**
112 * Deals with the contended case in ring-3 and ring-0.
113 *
114 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
115 * @param pCritSect The critsect.
116 * @param hNativeSelf The native thread handle.
117 */
118static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
119{
120 /*
121 * Start waiting.
122 */
123 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
124 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
125# ifdef IN_RING3
126 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
127# else
128 STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
129# endif
130
131 /*
132 * The wait loop.
133 */
134 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
135 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
136# ifdef IN_RING3
137# ifdef PDMCRITSECT_STRICT
138 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
139 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
140 if (RT_FAILURE(rc2))
141 return rc2;
142# else
143 RTTHREAD hThreadSelf = RTThreadSelf();
144# endif
145# endif
146 for (;;)
147 {
148# ifdef PDMCRITSECT_STRICT
149 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
150 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
151 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
152 if (RT_FAILURE(rc9))
153 return rc9;
154# elif defined(IN_RING3)
155 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
156# endif
157 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
158# ifdef IN_RING3
159 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
160# endif
161
162 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
163 return VERR_SEM_DESTROYED;
164 if (rc == VINF_SUCCESS)
165 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
166 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
167 }
168 /* won't get here */
169}
170#endif /* IN_RING3 || IN_RING0 */
171
172
173/**
174 * Common worker for the debug and normal APIs.
175 *
176 * @returns VINF_SUCCESS if entered successfully.
177 * @returns rcBusy when encountering a busy critical section in GC/R0.
178 * @returns VERR_SEM_DESTROYED if the critical section is dead.
179 *
180 * @param pCritSect The PDM critical section to enter.
181 * @param rcBusy The status code to return when we're in GC or R0
182 * and the section is busy.
183 */
184DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
185{
186 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
187 Assert(pCritSect->s.Core.cNestings >= 0);
188
189 /*
190 * If the critical section has already been destroyed, then inform the caller.
191 */
192 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
193 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
194 VERR_SEM_DESTROYED);
195
196 /*
197 * See if we're lucky.
198 */
199 /* NOP ... */
200 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
201 return VINF_SUCCESS;
202
203 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
204 /* ... not owned ... */
205 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
206 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
207
208 /* ... or nested. */
209 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
210 {
211 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
212 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
213 Assert(pCritSect->s.Core.cNestings > 1);
214 return VINF_SUCCESS;
215 }
216
217 /*
218 * Spin for a bit without incrementing the counter.
219 */
220 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
221 * cpu systems. */
222 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
223 while (cSpinsLeft-- > 0)
224 {
225 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
226 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
227 ASMNopPause();
228 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
229 cli'ed pendingpreemption check up front using sti w/ instruction fusing
230 for avoiding races. Hmm ... This is assuming the other party is actually
231 executing code on another CPU ... which we could keep track of if we
232 wanted. */
233 }
234
235#ifdef IN_RING3
236 /*
237 * Take the slow path.
238 */
239 NOREF(rcBusy);
240 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
241
242#else
243# ifdef IN_RING0
244 /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
245 * and would be better off switching out of that while waiting for
246 * the lock. Several of the locks jumps back to ring-3 just to
247 * get the lock, the ring-3 code will then call the kernel to do
248 * the lock wait and when the call return it will call ring-0
249 * again and resume via in setjmp style. Not very efficient. */
250# if 0
251 if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
252 * callers not prepared for longjmp/blocking to
253 * use PDMCritSectTryEnter. */
254 {
255 /*
256 * Leave HM context while waiting if necessary.
257 */
258 int rc;
259 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
260 {
261 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000);
262 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
263 }
264 else
265 {
266 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
267 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
268 PVMCPU pVCpu = VMMGetCpu(pVM);
269 HMR0Leave(pVM, pVCpu);
270 RTThreadPreemptRestore(NIL_RTTHREAD, ????);
271
272 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
273
274 RTThreadPreemptDisable(NIL_RTTHREAD, ????);
275 HMR0Enter(pVM, pVCpu);
276 }
277 return rc;
278 }
279# else
280 /*
281 * We preemption hasn't been disabled, we can block here in ring-0.
282 */
283 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
284 && ASMIntAreEnabled())
285 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
286# endif
287#endif /* IN_RING0 */
288
289 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
290
291 /*
292 * Call ring-3 to acquire the critical section?
293 */
294 if (rcBusy == VINF_SUCCESS)
295 {
296 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
297 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
298 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
299 }
300
301 /*
302 * Return busy.
303 */
304 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
305 return rcBusy;
306#endif /* !IN_RING3 */
307}
308
309
310/**
311 * Enters a PDM critical section.
312 *
313 * @returns VINF_SUCCESS if entered successfully.
314 * @returns rcBusy when encountering a busy critical section in GC/R0.
315 * @returns VERR_SEM_DESTROYED if the critical section is dead.
316 *
317 * @param pCritSect The PDM critical section to enter.
318 * @param rcBusy The status code to return when we're in GC or R0
319 * and the section is busy. Pass VINF_SUCCESS to
320 * acquired the critical section thru a ring-3
321 * call if necessary.
322 */
323VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
324{
325#ifndef PDMCRITSECT_STRICT
326 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
327#else
328 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
329 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
330#endif
331}
332
333
334/**
335 * Enters a PDM critical section, with location information for debugging.
336 *
337 * @returns VINF_SUCCESS if entered successfully.
338 * @returns rcBusy when encountering a busy critical section in GC/R0.
339 * @returns VERR_SEM_DESTROYED if the critical section is dead.
340 *
341 * @param pCritSect The PDM critical section to enter.
342 * @param rcBusy The status code to return when we're in GC or R0
343 * and the section is busy. Pass VINF_SUCCESS to
344 * acquired the critical section thru a ring-3
345 * call if necessary.
346 * @param uId Some kind of locking location ID. Typically a
347 * return address up the stack. Optional (0).
348 * @param pszFile The file where the lock is being acquired from.
349 * Optional.
350 * @param iLine The line number in that file. Optional (0).
351 * @param pszFunction The function where the lock is being acquired
352 * from. Optional.
353 */
354VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
355{
356#ifdef PDMCRITSECT_STRICT
357 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
358 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
359#else
360 NOREF(uId); RT_SRC_POS_NOREF();
361 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
362#endif
363}
364
365
366/**
367 * Common worker for the debug and normal APIs.
368 *
369 * @retval VINF_SUCCESS on success.
370 * @retval VERR_SEM_BUSY if the critsect was owned.
371 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
372 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
373 *
374 * @param pCritSect The critical section.
375 */
376static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
377{
378 /*
379 * If the critical section has already been destroyed, then inform the caller.
380 */
381 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
382 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
383 VERR_SEM_DESTROYED);
384
385 /*
386 * See if we're lucky.
387 */
388 /* NOP ... */
389 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
390 return VINF_SUCCESS;
391
392 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
393 /* ... not owned ... */
394 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
395 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
396
397 /* ... or nested. */
398 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
399 {
400 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
401 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
402 Assert(pCritSect->s.Core.cNestings > 1);
403 return VINF_SUCCESS;
404 }
405
406 /* no spinning */
407
408 /*
409 * Return busy.
410 */
411#ifdef IN_RING3
412 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
413#else
414 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
415#endif
416 LogFlow(("PDMCritSectTryEnter: locked\n"));
417 return VERR_SEM_BUSY;
418}
419
420
421/**
422 * Try enter a critical section.
423 *
424 * @retval VINF_SUCCESS on success.
425 * @retval VERR_SEM_BUSY if the critsect was owned.
426 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
427 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
428 *
429 * @param pCritSect The critical section.
430 */
431VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
432{
433#ifndef PDMCRITSECT_STRICT
434 return pdmCritSectTryEnter(pCritSect, NULL);
435#else
436 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
437 return pdmCritSectTryEnter(pCritSect, &SrcPos);
438#endif
439}
440
441
442/**
443 * Try enter a critical section, with location information for debugging.
444 *
445 * @retval VINF_SUCCESS on success.
446 * @retval VERR_SEM_BUSY if the critsect was owned.
447 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
448 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
449 *
450 * @param pCritSect The critical section.
451 * @param uId Some kind of locking location ID. Typically a
452 * return address up the stack. Optional (0).
453 * @param pszFile The file where the lock is being acquired from.
454 * Optional.
455 * @param iLine The line number in that file. Optional (0).
456 * @param pszFunction The function where the lock is being acquired
457 * from. Optional.
458 */
459VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
460{
461#ifdef PDMCRITSECT_STRICT
462 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
463 return pdmCritSectTryEnter(pCritSect, &SrcPos);
464#else
465 NOREF(uId); RT_SRC_POS_NOREF();
466 return pdmCritSectTryEnter(pCritSect, NULL);
467#endif
468}
469
470
471#ifdef IN_RING3
472/**
473 * Enters a PDM critical section.
474 *
475 * @returns VINF_SUCCESS if entered successfully.
476 * @returns rcBusy when encountering a busy critical section in GC/R0.
477 * @returns VERR_SEM_DESTROYED if the critical section is dead.
478 *
479 * @param pCritSect The PDM critical section to enter.
480 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
481 */
482VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
483{
484 int rc = PDMCritSectEnter(pCritSect, VERR_IGNORED);
485 if ( rc == VINF_SUCCESS
486 && fCallRing3
487 && pCritSect->s.Core.pValidatorRec
488 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
489 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
490 return rc;
491}
492#endif /* IN_RING3 */
493
494
495/**
496 * Leaves a critical section entered with PDMCritSectEnter().
497 *
498 * @returns Indication whether we really exited the critical section.
499 * @retval VINF_SUCCESS if we really exited.
500 * @retval VINF_SEM_NESTED if we only reduced the nesting count.
501 * @retval VERR_NOT_OWNER if you somehow ignore release assertions.
502 *
503 * @param pCritSect The PDM critical section to leave.
504 */
505VMMDECL(int) PDMCritSectLeave(PPDMCRITSECT pCritSect)
506{
507 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
508 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
509
510 /* Check for NOP sections before asserting ownership. */
511 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
512 return VINF_SUCCESS;
513
514 /*
515 * Always check that the caller is the owner (screw performance).
516 */
517 RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
518 AssertReleaseMsgReturn(pCritSect->s.Core.NativeThreadOwner == hNativeSelf,
519 ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
520 pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
521 pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings),
522 VERR_NOT_OWNER);
523 Assert(pCritSect->s.Core.cNestings >= 1);
524
525 /*
526 * Nested leave.
527 */
528 if (pCritSect->s.Core.cNestings > 1)
529 {
530 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
531 Assert(pCritSect->s.Core.cNestings >= 1);
532 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
533 Assert(pCritSect->s.Core.cLockers >= 0);
534 return VINF_SEM_NESTED;
535 }
536
537#ifdef IN_RING0
538# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
539 if (1) /* SUPSemEventSignal is safe */
540# else
541 if (ASMIntAreEnabled())
542# endif
543#endif
544#if defined(IN_RING3) || defined(IN_RING0)
545 {
546 /*
547 * Leave for real.
548 */
549 /* update members. */
550# ifdef IN_RING3
551 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
552 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
553# if defined(PDMCRITSECT_STRICT)
554 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
555 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
556# endif
557 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
558# endif
559 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
560 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
561 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
562 Assert(pCritSect->s.Core.cNestings == 0);
563
564 /* stop and decrement lockers. */
565 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
566 ASMCompilerBarrier();
567 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
568 {
569 /* Someone is waiting, wake up one of them. */
570 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
571 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
572 int rc = SUPSemEventSignal(pSession, hEvent);
573 AssertRC(rc);
574 }
575
576# ifdef IN_RING3
577 /* Signal exit event. */
578 if (hEventToSignal != NIL_RTSEMEVENT)
579 {
580 LogBird(("Signalling %#x\n", hEventToSignal));
581 int rc = RTSemEventSignal(hEventToSignal);
582 AssertRC(rc);
583 }
584# endif
585
586# if defined(DEBUG_bird) && defined(IN_RING0)
587 VMMTrashVolatileXMMRegs();
588# endif
589 }
590#endif /* IN_RING3 || IN_RING0 */
591#ifdef IN_RING0
592 else
593#endif
594#if defined(IN_RING0) || defined(IN_RC)
595 {
596 /*
597 * Try leave it.
598 */
599 if (pCritSect->s.Core.cLockers == 0)
600 {
601 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
602 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
603 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
604 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
605
606 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
607 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
608 return VINF_SUCCESS;
609
610 /* darn, someone raced in on us. */
611 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
612 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
613 Assert(pCritSect->s.Core.cNestings == 0);
614 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
615 }
616 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
617
618 /*
619 * Queue the request.
620 */
621 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
622 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
623 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
624 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
625 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
626 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
627 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
628 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
629 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
630 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
631 }
632#endif /* IN_RING0 || IN_RC */
633
634 return VINF_SUCCESS;
635}
636
637
638#if defined(IN_RING3) || defined(IN_RING0)
639/**
640 * Process the critical sections queued for ring-3 'leave'.
641 *
642 * @param pVCpu Pointer to the VMCPU.
643 */
644VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
645{
646 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
647
648 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
649 for (RTUINT i = 0; i < c; i++)
650 {
651# ifdef IN_RING3
652 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
653# else
654 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
655# endif
656
657 PDMCritSectLeave(pCritSect);
658 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
659 }
660
661 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
662 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
663}
664#endif /* IN_RING3 || IN_RING0 */
665
666
667/**
668 * Checks the caller is the owner of the critical section.
669 *
670 * @returns true if owner.
671 * @returns false if not owner.
672 * @param pCritSect The critical section.
673 */
674VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
675{
676#ifdef IN_RING3
677 return RTCritSectIsOwner(&pCritSect->s.Core);
678#else
679 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
680 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
681 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
682 return false;
683 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
684 || pCritSect->s.Core.cNestings > 1;
685#endif
686}
687
688
689/**
690 * Checks the specified VCPU is the owner of the critical section.
691 *
692 * @returns true if owner.
693 * @returns false if not owner.
694 * @param pCritSect The critical section.
695 * @param pVCpu Pointer to the VMCPU.
696 */
697VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, PVMCPU pVCpu)
698{
699#ifdef IN_RING3
700 NOREF(pVCpu);
701 return RTCritSectIsOwner(&pCritSect->s.Core);
702#else
703 Assert(&pVCpu->CTX_SUFF(pVM)->aCpus[pVCpu->idCpu] == pVCpu);
704 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
705 return false;
706 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
707 || pCritSect->s.Core.cNestings > 1;
708#endif
709}
710
711
712/**
713 * Checks if anyone is waiting on the critical section we own.
714 *
715 * @returns true if someone is waiting.
716 * @returns false if no one is waiting.
717 * @param pCritSect The critical section.
718 */
719VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
720{
721 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
722 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
723 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
724}
725
726
727/**
728 * Checks if a critical section is initialized or not.
729 *
730 * @returns true if initialized.
731 * @returns false if not initialized.
732 * @param pCritSect The critical section.
733 */
734VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
735{
736 return RTCritSectIsInitialized(&pCritSect->s.Core);
737}
738
739
740/**
741 * Gets the recursion depth.
742 *
743 * @returns The recursion depth.
744 * @param pCritSect The critical section.
745 */
746VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
747{
748 return RTCritSectGetRecursion(&pCritSect->s.Core);
749}
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use