VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 93725

Last change on this file since 93725 was 93725, checked in by vboxsync, 2 years ago

VMM: More arm64 adjustments. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 83.8 KB
Line 
1/* $Id: PDMAllCritSectRw.cpp 93725 2022-02-14 13:46:16Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM_CRITSECTRW
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsectrw.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vmcc.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/assert.h>
34#ifdef IN_RING3
35# include <iprt/lockvalidator.h>
36#endif
37#if defined(IN_RING3) || defined(IN_RING0)
38# include <iprt/semaphore.h>
39# include <iprt/thread.h>
40#endif
41#ifdef IN_RING0
42# include <iprt/time.h>
43#endif
44#ifdef RT_ARCH_AMD64
45# include <iprt/x86.h>
46#endif
47
48
49/*********************************************************************************************************************************
50* Defined Constants And Macros *
51*********************************************************************************************************************************/
52#if 0 /* unused */
53/** The number loops to spin for shared access in ring-3. */
54#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
55/** The number loops to spin for shared access in ring-0. */
56#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
57/** The number loops to spin for shared access in the raw-mode context. */
58#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
59
60/** The number loops to spin for exclusive access in ring-3. */
61#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
62/** The number loops to spin for exclusive access in ring-0. */
63#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
64/** The number loops to spin for exclusive access in the raw-mode context. */
65#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
66#endif
67
68/** Max number of write or write/read recursions. */
69#define PDM_CRITSECTRW_MAX_RECURSIONS _1M
70
71/** Skips some of the overly paranoid atomic reads and updates.
72 * Makes some assumptions about cache coherence, though not brave enough not to
73 * always end with an atomic update. */
74#define PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
75
76/** For reading RTCRITSECTRWSTATE::s::u64State. */
77#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
78# define PDMCRITSECTRW_READ_STATE(a_pu64State) ASMAtomicUoReadU64(a_pu64State)
79#else
80# define PDMCRITSECTRW_READ_STATE(a_pu64State) ASMAtomicReadU64(a_pu64State)
81#endif
82
83
84/* Undefine the automatic VBOX_STRICT API mappings. */
85#undef PDMCritSectRwEnterExcl
86#undef PDMCritSectRwTryEnterExcl
87#undef PDMCritSectRwEnterShared
88#undef PDMCritSectRwTryEnterShared
89
90
91/*********************************************************************************************************************************
92* Defined Constants And Macros *
93*********************************************************************************************************************************/
94#if defined(RTASM_HAVE_CMP_WRITE_U128) && defined(RT_ARCH_AMD64)
95static int32_t g_fCmpWriteSupported = -1;
96#endif
97
98
99/*********************************************************************************************************************************
100* Internal Functions *
101*********************************************************************************************************************************/
102static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
103
104
105#ifdef RTASM_HAVE_CMP_WRITE_U128
106
107# ifdef RT_ARCH_AMD64
108/**
109 * Called once to initialize g_fCmpWriteSupported.
110 */
111DECL_NO_INLINE(static, bool) pdmCritSectRwIsCmpWriteU128SupportedSlow(void)
112{
113 bool const fCmpWriteSupported = RT_BOOL(ASMCpuId_ECX(1) & X86_CPUID_FEATURE_ECX_CX16);
114 ASMAtomicWriteS32(&g_fCmpWriteSupported, fCmpWriteSupported);
115 return fCmpWriteSupported;
116}
117# endif
118
119
120/**
121 * Indicates whether hardware actually supports 128-bit compare & write.
122 */
123DECL_FORCE_INLINE(bool) pdmCritSectRwIsCmpWriteU128Supported(void)
124{
125# ifdef RT_ARCH_AMD64
126 int32_t const fCmpWriteSupported = g_fCmpWriteSupported;
127 if (RT_LIKELY(fCmpWriteSupported >= 0))
128 return fCmpWriteSupported != 0;
129 return pdmCritSectRwIsCmpWriteU128SupportedSlow();
130# else
131 return true;
132# endif
133}
134
135#endif /* RTASM_HAVE_CMP_WRITE_U128 */
136
137/**
138 * Gets the ring-3 native thread handle of the calling thread.
139 *
140 * @returns native thread handle (ring-3).
141 * @param pVM The cross context VM structure.
142 * @param pThis The read/write critical section. This is only used in
143 * R0 and RC.
144 */
145DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PVMCC pVM, PCPDMCRITSECTRW pThis)
146{
147#ifdef IN_RING3
148 RT_NOREF(pVM, pThis);
149 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
150
151#elif defined(IN_RING0)
152 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
153 NIL_RTNATIVETHREAD);
154 RTNATIVETHREAD hNativeSelf = GVMMR0GetRing3ThreadForSelf(pVM);
155 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
156
157#else
158# error "invalid context"
159#endif
160 return hNativeSelf;
161}
162
163
164DECL_NO_INLINE(static, int) pdmCritSectRwCorrupted(PPDMCRITSECTRW pThis, const char *pszMsg)
165{
166 ASMAtomicWriteU32(&pThis->s.Core.u32Magic, PDMCRITSECTRW_MAGIC_CORRUPT);
167 LogRel(("PDMCritSect: %s pCritSect=%p\n", pszMsg, pThis));
168 return VERR_PDM_CRITSECTRW_IPE;
169}
170
171
172
173#ifdef IN_RING3
174/**
175 * Changes the lock validator sub-class of the read/write critical section.
176 *
177 * It is recommended to try make sure that nobody is using this critical section
178 * while changing the value.
179 *
180 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
181 * lock validator isn't compiled in or either of the parameters are
182 * invalid.
183 * @param pThis Pointer to the read/write critical section.
184 * @param uSubClass The new sub-class value.
185 */
186VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
187{
188 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
189 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
190# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
191 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
192
193 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
194 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
195# else
196 NOREF(uSubClass);
197 return RTLOCKVAL_SUB_CLASS_INVALID;
198# endif
199}
200#endif /* IN_RING3 */
201
202
203/**
204 * Worker for pdmCritSectRwEnterShared returning with read-ownership of the CS.
205 */
206DECL_FORCE_INLINE(int) pdmCritSectRwEnterSharedGotIt(PPDMCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos,
207 bool fNoVal, RTTHREAD hThreadSelf)
208{
209#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
210 if (!fNoVal)
211 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
212#else
213 RT_NOREF(pSrcPos, fNoVal, hThreadSelf);
214#endif
215
216 /* got it! */
217 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
218 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
219 return VINF_SUCCESS;
220}
221
222/**
223 * Worker for pdmCritSectRwEnterShared and pdmCritSectRwEnterSharedBailOut
224 * that decrement the wait count and maybe resets the semaphore.
225 */
226DECLINLINE(int) pdmCritSectRwEnterSharedGotItAfterWaiting(PVMCC pVM, PPDMCRITSECTRW pThis, uint64_t u64State,
227 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
228{
229 for (;;)
230 {
231 uint64_t const u64OldState = u64State;
232 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
233 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count"));
234 AssertReturn((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT > 0,
235 pdmCritSectRwCorrupted(pThis, "Invalid read count"));
236 cWait--;
237 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
238 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
239
240 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
241 {
242 if (cWait == 0)
243 {
244 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
245 {
246 int rc = SUPSemEventMultiReset(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
247 AssertRCReturn(rc, rc);
248 }
249 }
250 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
251 }
252
253 ASMNopPause();
254 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
255 ASMNopPause();
256
257 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
258 }
259 /* not reached */
260}
261
262
263#if defined(IN_RING0) || (defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT))
264/**
265 * Worker for pdmCritSectRwEnterSharedContended that decrements both read counts
266 * and returns @a rc.
267 *
268 * @note May return VINF_SUCCESS if we race the exclusive leave function and
269 * come out on the bottom.
270 *
271 * Ring-3 only calls in a case where it is _not_ acceptable to take the
272 * lock, so even if we get the lock we'll have to leave. In the ring-0
273 * contexts, we can safely return VINF_SUCCESS in case of a race.
274 */
275DECL_NO_INLINE(static, int) pdmCritSectRwEnterSharedBailOut(PVMCC pVM, PPDMCRITSECTRW pThis, int rc,
276 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
277{
278#ifdef IN_RING0
279 uint64_t const tsStart = RTTimeNanoTS();
280 uint64_t cNsElapsed = 0;
281#endif
282 for (;;)
283 {
284 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
285 uint64_t u64OldState = u64State;
286
287 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
288 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count on bailout"));
289 cWait--;
290
291 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
292 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid read count on bailout"));
293
294 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
295 {
296 c--;
297 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
298 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
299 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
300 return rc;
301 }
302 else
303 {
304 /*
305 * The direction changed, so we can actually get the lock now.
306 *
307 * This means that we _have_ to wait on the semaphore to be signalled
308 * so we can properly reset it. Otherwise the stuff gets out of wack,
309 * because signalling and resetting will race one another. An
310 * exception would be if we're not the last reader waiting and don't
311 * need to worry about the resetting.
312 *
313 * An option would be to do the resetting in PDMCritSectRwEnterExcl,
314 * but that would still leave a racing PDMCritSectRwEnterShared
315 * spinning hard for a little bit, which isn't great...
316 */
317 if (cWait == 0)
318 {
319# ifdef IN_RING0
320 /* Do timeout processing first to avoid redoing the above. */
321 uint32_t cMsWait;
322 if (cNsElapsed <= RT_NS_10SEC)
323 cMsWait = 32;
324 else
325 {
326 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
327 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
328 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
329 {
330 LogFunc(("%p: giving up\n", pThis));
331 return rc;
332 }
333 cMsWait = 2;
334 }
335
336 int rcWait = SUPSemEventMultiWait(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, cMsWait);
337 Log11Func(("%p: rc=%Rrc %'RU64 ns (hNativeWriter=%p u64State=%#RX64)\n", pThis, rcWait,
338 RTTimeNanoTS() - tsStart, pThis->s.Core.u.s.hNativeWriter, pThis->s.Core.u.s.u64State));
339# else
340 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
341 int rcWait = SUPSemEventMultiWaitNoResume(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, RT_MS_5SEC);
342 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
343# endif
344 if (rcWait == VINF_SUCCESS)
345 {
346# ifdef IN_RING0
347 return pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
348# else
349 /* ring-3: Cannot return VINF_SUCCESS. */
350 Assert(RT_FAILURE_NP(rc));
351 int rc2 = pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
352 if (RT_SUCCESS(rc2))
353 rc2 = pdmCritSectRwLeaveSharedWorker(pVM, pThis, fNoVal);
354 return rc;
355# endif
356 }
357 AssertMsgReturn(rcWait == VERR_TIMEOUT || rcWait == VERR_INTERRUPTED,
358 ("%p: rcWait=%Rrc rc=%Rrc", pThis, rcWait, rc),
359 RT_FAILURE_NP(rcWait) ? rcWait : -rcWait);
360 }
361 else
362 {
363 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
364 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
365 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
366 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
367 }
368
369# ifdef IN_RING0
370 /* Calculate the elapsed time here to avoid redoing state work. */
371 cNsElapsed = RTTimeNanoTS() - tsStart;
372# endif
373 }
374
375 ASMNopPause();
376 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
377 ASMNopPause();
378 }
379}
380#endif /* IN_RING0 || (IN_RING3 && PDMCRITSECTRW_STRICT) */
381
382
383/**
384 * Worker for pdmCritSectRwEnterShared that handles waiting for a contended CS.
385 * Caller has already added us to the read and read-wait counters.
386 */
387static int pdmCritSectRwEnterSharedContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis,
388 int rcBusy, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
389{
390 PSUPDRVSESSION const pSession = pVM->pSession;
391 SUPSEMEVENTMULTI const hEventMulti = (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead;
392# ifdef IN_RING0
393 uint64_t const tsStart = RTTimeNanoTS();
394 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
395 uint64_t cNsMaxTotal = cNsMaxTotalDef;
396 uint32_t cMsMaxOne = RT_MS_5SEC;
397 bool fNonInterruptible = false;
398# endif
399
400 for (uint32_t iLoop = 0; ; iLoop++)
401 {
402 /*
403 * Wait for the direction to switch.
404 */
405 int rc;
406# ifdef IN_RING3
407# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
408 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
409 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
410 if (RT_FAILURE(rc))
411 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rc, pSrcPos, fNoVal, hThreadSelf);
412# else
413 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
414# endif
415# endif
416
417 for (;;)
418 {
419 /*
420 * We always wait with a timeout so we can re-check the structure sanity
421 * and not get stuck waiting on a corrupt or deleted section.
422 */
423# ifdef IN_RING3
424 rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, RT_MS_5SEC);
425# else
426 rc = !fNonInterruptible
427 ? SUPSemEventMultiWaitNoResume(pSession, hEventMulti, cMsMaxOne)
428 : SUPSemEventMultiWait(pSession, hEventMulti, cMsMaxOne);
429 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hNativeWriter=%p u64State=%#RX64)\n", pThis, rc,
430 RTTimeNanoTS() - tsStart, cMsMaxOne, pThis->s.Core.u.s.hNativeWriter, pThis->s.Core.u.s.u64State));
431# endif
432 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
433 { /* likely */ }
434 else
435 {
436# ifdef IN_RING3
437 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
438# endif
439 return VERR_SEM_DESTROYED;
440 }
441 if (RT_LIKELY(rc == VINF_SUCCESS))
442 break;
443
444 /*
445 * Timeout and interrupted waits needs careful handling in ring-0
446 * because we're cooperating with ring-3 on this critical section
447 * and thus need to make absolutely sure we won't get stuck here.
448 *
449 * The r0 interrupted case means something is pending (termination,
450 * signal, APC, debugger, whatever), so we must try our best to
451 * return to the caller and to ring-3 so it can be dealt with.
452 */
453 if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
454 {
455# ifdef IN_RING0
456 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
457 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
458 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
459 ("rcTerm=%Rrc\n", rcTerm));
460 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
461 cNsMaxTotal = RT_NS_1MIN;
462
463 if (rc == VERR_TIMEOUT)
464 {
465 /* Try return get out of here with a non-VINF_SUCCESS status if
466 the thread is terminating or if the timeout has been exceeded. */
467 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedVerrTimeout);
468 if ( rcTerm == VINF_THREAD_IS_TERMINATING
469 || cNsElapsed > cNsMaxTotal)
470 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc,
471 pSrcPos, fNoVal, hThreadSelf);
472 }
473 else
474 {
475 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
476 we will try non-interruptible sleep for a while to help resolve the issue
477 w/o guru'ing. */
478 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedVerrInterrupted);
479 if ( rcTerm != VINF_THREAD_IS_TERMINATING
480 && rcBusy == VINF_SUCCESS
481 && pVCpu != NULL
482 && cNsElapsed <= cNsMaxTotal)
483 {
484 if (!fNonInterruptible)
485 {
486 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedNonInterruptibleWaits);
487 fNonInterruptible = true;
488 cMsMaxOne = 32;
489 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
490 if (cNsLeft > RT_NS_10SEC)
491 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
492 }
493 }
494 else
495 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc,
496 pSrcPos, fNoVal, hThreadSelf);
497 }
498# else /* IN_RING3 */
499 RT_NOREF(pVM, pVCpu, rcBusy);
500# endif /* IN_RING3 */
501 }
502 /*
503 * Any other return code is fatal.
504 */
505 else
506 {
507# ifdef IN_RING3
508 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
509# endif
510 AssertMsgFailed(("rc=%Rrc\n", rc));
511 return RT_FAILURE_NP(rc) ? rc : -rc;
512 }
513 }
514
515# ifdef IN_RING3
516 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
517# endif
518
519 /*
520 * Check the direction.
521 */
522 Assert(pThis->s.Core.fNeedReset);
523 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
524 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
525 {
526 /*
527 * Decrement the wait count and maybe reset the semaphore (if we're last).
528 */
529 return pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
530 }
531
532 AssertMsg(iLoop < 1,
533 ("%p: %u u64State=%#RX64 hNativeWriter=%p\n", pThis, iLoop, u64State, pThis->s.Core.u.s.hNativeWriter));
534 RTThreadYield();
535 }
536
537 /* not reached */
538}
539
540
541/**
542 * Worker that enters a read/write critical section with shard access.
543 *
544 * @returns VBox status code.
545 * @param pVM The cross context VM structure.
546 * @param pThis Pointer to the read/write critical section.
547 * @param rcBusy The busy return code for ring-0 and ring-3.
548 * @param fTryOnly Only try enter it, don't wait.
549 * @param pSrcPos The source position. (Can be NULL.)
550 * @param fNoVal No validation records.
551 */
552static int pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
553 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
554{
555 /*
556 * Validate input.
557 */
558 AssertPtr(pThis);
559 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
560
561#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
562 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
563 if (!fTryOnly)
564 {
565 int rc9;
566 RTNATIVETHREAD hNativeWriter;
567 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
568 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis))
569 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
570 else
571 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
572 if (RT_FAILURE(rc9))
573 return rc9;
574 }
575#else
576 RTTHREAD hThreadSelf = NIL_RTTHREAD;
577#endif
578
579 /*
580 * Work the state.
581 */
582 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
583 uint64_t u64OldState = u64State;
584 for (;;)
585 {
586 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
587 {
588 /* It flows in the right direction, try follow it before it changes. */
589 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
590 c++;
591 Assert(c < RTCSRW_CNT_MASK / 4);
592 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
593 u64State &= ~RTCSRW_CNT_RD_MASK;
594 u64State |= c << RTCSRW_CNT_RD_SHIFT;
595 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
596 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
597 }
598 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
599 {
600 /* Wrong direction, but we're alone here and can simply try switch the direction. */
601 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
602 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
603 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
604 {
605 Assert(!pThis->s.Core.fNeedReset);
606 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
607 }
608 }
609 else
610 {
611 /* Is the writer perhaps doing a read recursion? */
612 RTNATIVETHREAD hNativeWriter;
613 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
614 if (hNativeWriter != NIL_RTNATIVETHREAD)
615 {
616 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
617 if (hNativeSelf == hNativeWriter)
618 {
619#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
620 if (!fNoVal)
621 {
622 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
623 if (RT_FAILURE(rc9))
624 return rc9;
625 }
626#endif
627 uint32_t const cReads = ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
628 Assert(cReads < _16K);
629 AssertReturnStmt(cReads < PDM_CRITSECTRW_MAX_RECURSIONS, ASMAtomicDecU32(&pThis->s.Core.cWriterReads),
630 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
631 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
632 return VINF_SUCCESS; /* don't break! */
633 }
634 }
635
636 /*
637 * If we're only trying, return already.
638 */
639 if (fTryOnly)
640 {
641 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
642 return VERR_SEM_BUSY;
643 }
644
645#if defined(IN_RING3) || defined(IN_RING0)
646 /*
647 * Add ourselves to the queue and wait for the direction to change.
648 */
649 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
650 c++;
651 Assert(c < RTCSRW_CNT_MASK / 2);
652 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
653
654 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
655 cWait++;
656 Assert(cWait <= c);
657 Assert(cWait < RTCSRW_CNT_MASK / 2);
658 AssertReturn(cWait < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
659
660 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
661 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
662
663 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
664 {
665 /*
666 * In ring-3 it's straight forward, just optimize the RTThreadSelf() call.
667 */
668# if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
669 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
670# elif defined(IN_RING3)
671 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, RTThreadSelf());
672# else /* IN_RING0 */
673 /*
674 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
675 * account when waiting on contended locks.
676 */
677 PVMCPUCC pVCpu = VMMGetCpu(pVM);
678 if (pVCpu)
679 {
680 VMMR0EMTBLOCKCTX Ctx;
681 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pThis, &Ctx);
682 if (rc == VINF_SUCCESS)
683 {
684 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
685
686 rc = pdmCritSectRwEnterSharedContended(pVM, pVCpu, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
687
688 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
689 }
690 else
691 {
692 //STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLockBusy);
693 rc = pdmCritSectRwEnterSharedBailOut(pVM, pThis, rc, pSrcPos, fNoVal, hThreadSelf);
694 }
695 return rc;
696 }
697
698 /* Non-EMT. */
699 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
700 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
701# endif /* IN_RING0 */
702 }
703
704#else /* !IN_RING3 && !IN_RING0 */
705 /*
706 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
707 * back to ring-3 and do it there or return rcBusy.
708 */
709# error "Unused code."
710 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
711 if (rcBusy == VINF_SUCCESS)
712 {
713 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
714 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
715 * back to ring-3. Goes for both kind of crit sects. */
716 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
717 }
718 return rcBusy;
719#endif /* !IN_RING3 && !IN_RING0 */
720 }
721
722 ASMNopPause();
723 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
724 { /* likely */ }
725 else
726 return VERR_SEM_DESTROYED;
727 ASMNopPause();
728
729 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
730 u64OldState = u64State;
731 }
732 /* not reached */
733}
734
735
736/**
737 * Enter a critical section with shared (read) access.
738 *
739 * @returns VBox status code.
740 * @retval VINF_SUCCESS on success.
741 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
742 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
743 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
744 * during the operation.
745 *
746 * @param pVM The cross context VM structure.
747 * @param pThis Pointer to the read/write critical section.
748 * @param rcBusy The status code to return when we're in RC or R0 and the
749 * section is busy. Pass VINF_SUCCESS to acquired the
750 * critical section thru a ring-3 call if necessary.
751 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
752 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
753 * RTCritSectRwEnterShared.
754 */
755VMMDECL(int) PDMCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
756{
757#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
758 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
759#else
760 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
761 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
762#endif
763}
764
765
766/**
767 * Enter a critical section with shared (read) access.
768 *
769 * @returns VBox status code.
770 * @retval VINF_SUCCESS on success.
771 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
772 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
773 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
774 * during the operation.
775 *
776 * @param pVM The cross context VM structure.
777 * @param pThis Pointer to the read/write critical section.
778 * @param rcBusy The status code to return when we're in RC or R0 and the
779 * section is busy. Pass VINF_SUCCESS to acquired the
780 * critical section thru a ring-3 call if necessary.
781 * @param uId Where we're entering the section.
782 * @param SRC_POS The source position.
783 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
784 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
785 * RTCritSectRwEnterSharedDebug.
786 */
787VMMDECL(int) PDMCritSectRwEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
788{
789 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
790#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
791 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
792#else
793 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
794 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
795#endif
796}
797
798
799/**
800 * Try enter a critical section with shared (read) access.
801 *
802 * @returns VBox status code.
803 * @retval VINF_SUCCESS on success.
804 * @retval VERR_SEM_BUSY if the critsect was owned.
805 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
806 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
807 * during the operation.
808 *
809 * @param pVM The cross context VM structure.
810 * @param pThis Pointer to the read/write critical section.
811 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
812 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
813 * RTCritSectRwTryEnterShared.
814 */
815VMMDECL(int) PDMCritSectRwTryEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis)
816{
817#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
818 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
819#else
820 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
821 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
822#endif
823}
824
825
826/**
827 * Try enter a critical section with shared (read) access.
828 *
829 * @returns VBox status code.
830 * @retval VINF_SUCCESS on success.
831 * @retval VERR_SEM_BUSY if the critsect was owned.
832 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
833 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
834 * during the operation.
835 *
836 * @param pVM The cross context VM structure.
837 * @param pThis Pointer to the read/write critical section.
838 * @param uId Where we're entering the section.
839 * @param SRC_POS The source position.
840 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
841 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
842 * RTCritSectRwTryEnterSharedDebug.
843 */
844VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
845{
846 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
847#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
848 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
849#else
850 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
851 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
852#endif
853}
854
855
856#ifdef IN_RING3
857/**
858 * Enters a PDM read/write critical section with shared (read) access.
859 *
860 * @returns VINF_SUCCESS if entered successfully.
861 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
862 * during the operation.
863 *
864 * @param pVM The cross context VM structure.
865 * @param pThis Pointer to the read/write critical section.
866 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
867 */
868VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
869{
870 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3);
871}
872#endif
873
874
875/**
876 * Leave a critical section held with shared access.
877 *
878 * @returns VBox status code.
879 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
880 * during the operation.
881 * @param pVM The cross context VM structure.
882 * @param pThis Pointer to the read/write critical section.
883 * @param fNoVal No validation records (i.e. queued release).
884 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
885 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
886 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
887 */
888static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
889{
890 /*
891 * Validate handle.
892 */
893 AssertPtr(pThis);
894 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
895
896#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
897 NOREF(fNoVal);
898#endif
899
900 /*
901 * Check the direction and take action accordingly.
902 */
903#ifdef IN_RING0
904 PVMCPUCC pVCpu = NULL;
905#endif
906 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
907 uint64_t u64OldState = u64State;
908 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
909 {
910#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
911 if (fNoVal)
912 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
913 else
914 {
915 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
916 if (RT_FAILURE(rc9))
917 return rc9;
918 }
919#endif
920 for (;;)
921 {
922 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
923 AssertReturn(c > 0, VERR_NOT_OWNER);
924 c--;
925
926 if ( c > 0
927 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
928 {
929 /* Don't change the direction. */
930 u64State &= ~RTCSRW_CNT_RD_MASK;
931 u64State |= c << RTCSRW_CNT_RD_SHIFT;
932 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
933 break;
934 }
935 else
936 {
937#if defined(IN_RING3) || defined(IN_RING0)
938# ifdef IN_RING0
939 Assert(RTSemEventIsSignalSafe() == RTSemEventMultiIsSignalSafe());
940 if (!pVCpu)
941 pVCpu = VMMGetCpu(pVM);
942 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
943 || VMMRZCallRing3IsEnabled(pVCpu)
944 || RTSemEventIsSignalSafe()
945 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
946 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
947 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
948 )
949# endif
950 {
951 /* Reverse the direction and signal the writer threads. */
952 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
953 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
954 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
955 {
956 int rc;
957# ifdef IN_RING0
958 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
959 if (!RTSemEventIsSignalSafe() && pVCpu != NULL)
960 {
961 VMMR0EMTBLOCKCTX Ctx;
962 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
963 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
964
965 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
966
967 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
968 }
969 else
970# endif
971 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
972 AssertRC(rc);
973 return rc;
974 }
975 }
976#endif /* IN_RING3 || IN_RING0 */
977#ifndef IN_RING3
978# ifdef IN_RING0
979 else
980# endif
981 {
982 /* Queue the exit request (ring-3). */
983# ifndef IN_RING0
984 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
985# endif
986 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
987 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
988 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves),
989 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
990 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = pThis->s.pSelfR3;
991 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
992 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i])
993 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] & HOST_PAGE_OFFSET_MASK)
994 == ((uintptr_t)pThis & HOST_PAGE_OFFSET_MASK),
995 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i], pThis),
996 pdmCritSectRwCorrupted(pThis, "Invalid self pointer"));
997 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
998 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
999 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1000 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
1001 break;
1002 }
1003#endif
1004 }
1005
1006 ASMNopPause();
1007 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
1008 { }
1009 else
1010 return VERR_SEM_DESTROYED;
1011 ASMNopPause();
1012
1013 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1014 u64OldState = u64State;
1015 }
1016 }
1017 else
1018 {
1019 /*
1020 * Write direction. Check that it's the owner calling and that it has reads to undo.
1021 */
1022 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1023 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1024
1025 RTNATIVETHREAD hNativeWriter;
1026 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1027 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1028 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
1029#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1030 if (!fNoVal)
1031 {
1032 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
1033 if (RT_FAILURE(rc))
1034 return rc;
1035 }
1036#endif
1037 uint32_t cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
1038 AssertReturn(cDepth < PDM_CRITSECTRW_MAX_RECURSIONS, pdmCritSectRwCorrupted(pThis, "too many writer-read recursions"));
1039 }
1040
1041 return VINF_SUCCESS;
1042}
1043
1044
1045/**
1046 * Leave a critical section held with shared access.
1047 *
1048 * @returns VBox status code.
1049 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1050 * during the operation.
1051 * @param pVM The cross context VM structure.
1052 * @param pThis Pointer to the read/write critical section.
1053 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
1054 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
1055 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
1056 */
1057VMMDECL(int) PDMCritSectRwLeaveShared(PVMCC pVM, PPDMCRITSECTRW pThis)
1058{
1059 return pdmCritSectRwLeaveSharedWorker(pVM, pThis, false /*fNoVal*/);
1060}
1061
1062
1063#if defined(IN_RING3) || defined(IN_RING0)
1064/**
1065 * PDMCritSectBothFF interface.
1066 *
1067 * @param pVM The cross context VM structure.
1068 * @param pThis Pointer to the read/write critical section.
1069 */
1070void pdmCritSectRwLeaveSharedQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
1071{
1072 pdmCritSectRwLeaveSharedWorker(pVM, pThis, true /*fNoVal*/);
1073}
1074#endif
1075
1076
1077/**
1078 * Worker for pdmCritSectRwEnterExcl that bails out on wait failure.
1079 *
1080 * @returns @a rc unless corrupted.
1081 * @param pThis Pointer to the read/write critical section.
1082 * @param rc The status to return.
1083 */
1084DECL_NO_INLINE(static, int) pdmCritSectRwEnterExclBailOut(PPDMCRITSECTRW pThis, int rc)
1085{
1086 /*
1087 * Decrement the counts and return the error.
1088 */
1089 for (;;)
1090 {
1091 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1092 uint64_t const u64OldState = u64State;
1093 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1094 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on bailout"));
1095 c--;
1096 u64State &= ~RTCSRW_CNT_WR_MASK;
1097 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1098 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1099 return rc;
1100
1101 ASMNopPause();
1102 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1103 ASMNopPause();
1104 }
1105}
1106
1107
1108/**
1109 * Worker for pdmCritSectRwEnterExcl that handles the red tape after we've
1110 * gotten exclusive ownership of the critical section.
1111 */
1112DECL_FORCE_INLINE(int) pdmCritSectRwEnterExclFirst(PPDMCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos,
1113 bool fNoVal, RTTHREAD hThreadSelf)
1114{
1115 RT_NOREF(hThreadSelf, fNoVal, pSrcPos);
1116 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
1117
1118#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1119 pThis->s.Core.cWriteRecursions = 1;
1120#else
1121 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
1122#endif
1123 Assert(pThis->s.Core.cWriterReads == 0);
1124
1125#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1126 if (!fNoVal)
1127 {
1128 if (hThreadSelf == NIL_RTTHREAD)
1129 hThreadSelf = RTThreadSelfAutoAdopt();
1130 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
1131 }
1132#endif
1133 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
1134 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
1135 return VINF_SUCCESS;
1136}
1137
1138
1139#if defined(IN_RING3) || defined(IN_RING0)
1140/**
1141 * Worker for pdmCritSectRwEnterExcl that handles waiting when the section is
1142 * contended.
1143 */
1144static int pdmR3R0CritSectRwEnterExclContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis, RTNATIVETHREAD hNativeSelf,
1145 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, int rcBusy, RTTHREAD hThreadSelf)
1146{
1147 RT_NOREF(hThreadSelf, rcBusy, pSrcPos, fNoVal, pVCpu);
1148
1149 PSUPDRVSESSION const pSession = pVM->pSession;
1150 SUPSEMEVENT const hEvent = (SUPSEMEVENT)pThis->s.Core.hEvtWrite;
1151# ifdef IN_RING0
1152 uint64_t const tsStart = RTTimeNanoTS();
1153 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
1154 uint64_t cNsMaxTotal = cNsMaxTotalDef;
1155 uint32_t cMsMaxOne = RT_MS_5SEC;
1156 bool fNonInterruptible = false;
1157# endif
1158
1159 for (uint32_t iLoop = 0; ; iLoop++)
1160 {
1161 /*
1162 * Wait for our turn.
1163 */
1164 int rc;
1165# ifdef IN_RING3
1166# ifdef PDMCRITSECTRW_STRICT
1167 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
1168 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
1169 if (RT_SUCCESS(rc))
1170 { /* likely */ }
1171 else
1172 return pdmCritSectRwEnterExclBailOut(pThis, rc);
1173# else
1174 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
1175# endif
1176# endif
1177
1178 for (;;)
1179 {
1180 /*
1181 * We always wait with a timeout so we can re-check the structure sanity
1182 * and not get stuck waiting on a corrupt or deleted section.
1183 */
1184# ifdef IN_RING3
1185 rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_MS_5SEC);
1186# else
1187 rc = !fNonInterruptible
1188 ? SUPSemEventWaitNoResume(pSession, hEvent, cMsMaxOne)
1189 : SUPSemEventWait(pSession, hEvent, cMsMaxOne);
1190 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hNativeWriter=%p)\n",
1191 pThis, rc, RTTimeNanoTS() - tsStart, cMsMaxOne, pThis->s.Core.u.s.hNativeWriter));
1192# endif
1193 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
1194 { /* likely */ }
1195 else
1196 {
1197# ifdef IN_RING3
1198 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1199# endif
1200 return VERR_SEM_DESTROYED;
1201 }
1202 if (RT_LIKELY(rc == VINF_SUCCESS))
1203 break;
1204
1205 /*
1206 * Timeout and interrupted waits needs careful handling in ring-0
1207 * because we're cooperating with ring-3 on this critical section
1208 * and thus need to make absolutely sure we won't get stuck here.
1209 *
1210 * The r0 interrupted case means something is pending (termination,
1211 * signal, APC, debugger, whatever), so we must try our best to
1212 * return to the caller and to ring-3 so it can be dealt with.
1213 */
1214 if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
1215 {
1216# ifdef IN_RING0
1217 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
1218 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
1219 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
1220 ("rcTerm=%Rrc\n", rcTerm));
1221 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
1222 cNsMaxTotal = RT_NS_1MIN;
1223
1224 if (rc == VERR_TIMEOUT)
1225 {
1226 /* Try return get out of here with a non-VINF_SUCCESS status if
1227 the thread is terminating or if the timeout has been exceeded. */
1228 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclVerrTimeout);
1229 if ( rcTerm == VINF_THREAD_IS_TERMINATING
1230 || cNsElapsed > cNsMaxTotal)
1231 return pdmCritSectRwEnterExclBailOut(pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc);
1232 }
1233 else
1234 {
1235 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
1236 we will try non-interruptible sleep for a while to help resolve the issue
1237 w/o guru'ing. */
1238 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclVerrInterrupted);
1239 if ( rcTerm != VINF_THREAD_IS_TERMINATING
1240 && rcBusy == VINF_SUCCESS
1241 && pVCpu != NULL
1242 && cNsElapsed <= cNsMaxTotal)
1243 {
1244 if (!fNonInterruptible)
1245 {
1246 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclNonInterruptibleWaits);
1247 fNonInterruptible = true;
1248 cMsMaxOne = 32;
1249 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
1250 if (cNsLeft > RT_NS_10SEC)
1251 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
1252 }
1253 }
1254 else
1255 return pdmCritSectRwEnterExclBailOut(pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc);
1256 }
1257# else /* IN_RING3 */
1258 RT_NOREF(pVM, pVCpu, rcBusy);
1259# endif /* IN_RING3 */
1260 }
1261 /*
1262 * Any other return code is fatal.
1263 */
1264 else
1265 {
1266# ifdef IN_RING3
1267 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1268# endif
1269 AssertMsgFailed(("rc=%Rrc\n", rc));
1270 return RT_FAILURE_NP(rc) ? rc : -rc;
1271 }
1272 }
1273
1274# ifdef IN_RING3
1275 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1276# endif
1277
1278 /*
1279 * Try take exclusive write ownership.
1280 */
1281 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1282 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1283 {
1284 bool fDone;
1285 ASMAtomicCmpXchgHandle(&pThis->s.Core.u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
1286 if (fDone)
1287 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1288 }
1289 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
1290 }
1291}
1292#endif /* IN_RING3 || IN_RING0 */
1293
1294
1295/**
1296 * Worker that enters a read/write critical section with exclusive access.
1297 *
1298 * @returns VBox status code.
1299 * @param pVM The cross context VM structure.
1300 * @param pThis Pointer to the read/write critical section.
1301 * @param rcBusy The busy return code for ring-0 and ring-3.
1302 * @param fTryOnly Only try enter it, don't wait.
1303 * @param pSrcPos The source position. (Can be NULL.)
1304 * @param fNoVal No validation records.
1305 */
1306static int pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
1307 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
1308{
1309 /*
1310 * Validate input.
1311 */
1312 AssertPtr(pThis);
1313 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1314
1315 RTTHREAD hThreadSelf = NIL_RTTHREAD;
1316#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1317 if (!fTryOnly)
1318 {
1319 hThreadSelf = RTThreadSelfAutoAdopt();
1320 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
1321 if (RT_FAILURE(rc9))
1322 return rc9;
1323 }
1324#endif
1325
1326 /*
1327 * Check if we're already the owner and just recursing.
1328 */
1329 RTNATIVETHREAD const hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1330 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1331 RTNATIVETHREAD hNativeWriter;
1332 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1333 if (hNativeSelf == hNativeWriter)
1334 {
1335 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
1336#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1337 if (!fNoVal)
1338 {
1339 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
1340 if (RT_FAILURE(rc9))
1341 return rc9;
1342 }
1343#endif
1344 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
1345#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1346 uint32_t const cDepth = ++pThis->s.Core.cWriteRecursions;
1347#else
1348 uint32_t const cDepth = ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
1349#endif
1350 AssertReturnStmt(cDepth > 1 && cDepth <= PDM_CRITSECTRW_MAX_RECURSIONS,
1351 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions),
1352 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
1353 return VINF_SUCCESS;
1354 }
1355
1356 /*
1357 * First we try grab an idle critical section using 128-bit atomics.
1358 */
1359 /** @todo This could be moved up before the recursion check. */
1360 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1361#ifdef RTASM_HAVE_CMP_WRITE_U128
1362 if ( (u64State & ~RTCSRW_DIR_MASK) == 0
1363 && pdmCritSectRwIsCmpWriteU128Supported())
1364 {
1365 RTCRITSECTRWSTATE OldState;
1366 OldState.s.u64State = u64State;
1367 OldState.s.hNativeWriter = NIL_RTNATIVETHREAD;
1368 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo));
1369
1370 RTCRITSECTRWSTATE NewState;
1371 NewState.s.u64State = (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
1372 NewState.s.hNativeWriter = hNativeSelf;
1373
1374 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128))
1375 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1376
1377 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1378 }
1379#endif
1380
1381 /*
1382 * Do it step by step. Update the state to reflect our desire.
1383 */
1384 uint64_t u64OldState = u64State;
1385
1386 for (;;)
1387 {
1388 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
1389 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
1390 {
1391 /* It flows in the right direction, try follow it before it changes. */
1392 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1393 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
1394 c++;
1395 Assert(c < RTCSRW_CNT_WR_MASK / 4);
1396 u64State &= ~RTCSRW_CNT_WR_MASK;
1397 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1398 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1399 break;
1400 }
1401 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
1402 {
1403 /* Wrong direction, but we're alone here and can simply try switch the direction. */
1404 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1405 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
1406 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1407 break;
1408 }
1409 else if (fTryOnly)
1410 {
1411 /* Wrong direction and we're not supposed to wait, just return. */
1412 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
1413 return VERR_SEM_BUSY;
1414 }
1415 else
1416 {
1417 /* Add ourselves to the write count and break out to do the wait. */
1418 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1419 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
1420 c++;
1421 Assert(c < RTCSRW_CNT_WR_MASK / 4);
1422 u64State &= ~RTCSRW_CNT_WR_MASK;
1423 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1424 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1425 break;
1426 }
1427
1428 ASMNopPause();
1429
1430 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
1431 { /* likely */ }
1432 else
1433 return VERR_SEM_DESTROYED;
1434
1435 ASMNopPause();
1436 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1437 u64OldState = u64State;
1438 }
1439
1440 /*
1441 * If we're in write mode now try grab the ownership. Play fair if there
1442 * are threads already waiting.
1443 */
1444 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
1445 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
1446 || fTryOnly);
1447 if (fDone)
1448 {
1449 ASMAtomicCmpXchgHandle(&pThis->s.Core.u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
1450 if (fDone)
1451 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1452 }
1453
1454 /*
1455 * Okay, we have contention and will have to wait unless we're just trying.
1456 */
1457 if (fTryOnly)
1458 {
1459 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl)); /** @todo different statistics for this */
1460 return pdmCritSectRwEnterExclBailOut(pThis, VERR_SEM_BUSY);
1461 }
1462
1463 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
1464
1465 /*
1466 * Ring-3 is pretty straight forward.
1467 */
1468#if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
1469 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, hThreadSelf);
1470#elif defined(IN_RING3)
1471 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, RTThreadSelf());
1472
1473#elif defined(IN_RING0)
1474 /*
1475 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
1476 * account when waiting on contended locks.
1477 */
1478 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1479 if (pVCpu)
1480 {
1481 VMMR0EMTBLOCKCTX Ctx;
1482 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pThis, &Ctx);
1483 if (rc == VINF_SUCCESS)
1484 {
1485 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1486
1487 rc = pdmR3R0CritSectRwEnterExclContended(pVM, pVCpu, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1488
1489 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1490 }
1491 else
1492 {
1493 //STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLockBusy);
1494 rc = pdmCritSectRwEnterExclBailOut(pThis, rc);
1495 }
1496 return rc;
1497 }
1498
1499 /* Non-EMT. */
1500 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1501 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1502
1503#else
1504# error "Unused."
1505 /*
1506 * Raw-mode: Call host and take it there if rcBusy is VINF_SUCCESS.
1507 */
1508 rcBusy = pdmCritSectRwEnterExclBailOut(pThis, rcBusy);
1509 if (rcBusy == VINF_SUCCESS)
1510 {
1511 Assert(!fTryOnly);
1512 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1513 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
1514 * back to ring-3. Goes for both kind of crit sects. */
1515 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
1516 }
1517 return rcBusy;
1518#endif
1519}
1520
1521
1522/**
1523 * Try enter a critical section with exclusive (write) access.
1524 *
1525 * @returns VBox status code.
1526 * @retval VINF_SUCCESS on success.
1527 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
1528 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1529 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1530 * during the operation.
1531 *
1532 * @param pVM The cross context VM structure.
1533 * @param pThis Pointer to the read/write critical section.
1534 * @param rcBusy The status code to return when we're in RC or R0 and the
1535 * section is busy. Pass VINF_SUCCESS to acquired the
1536 * critical section thru a ring-3 call if necessary.
1537 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
1538 * PDMCritSectRwTryEnterExclDebug,
1539 * PDMCritSectEnterDebug, PDMCritSectEnter,
1540 * RTCritSectRwEnterExcl.
1541 */
1542VMMDECL(int) PDMCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
1543{
1544#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1545 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1546#else
1547 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1548 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1549#endif
1550}
1551
1552
1553/**
1554 * Try enter a critical section with exclusive (write) access.
1555 *
1556 * @returns VBox status code.
1557 * @retval VINF_SUCCESS on success.
1558 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
1559 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1560 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1561 * during the operation.
1562 *
1563 * @param pVM The cross context VM structure.
1564 * @param pThis Pointer to the read/write critical section.
1565 * @param rcBusy The status code to return when we're in RC or R0 and the
1566 * section is busy. Pass VINF_SUCCESS to acquired the
1567 * critical section thru a ring-3 call if necessary.
1568 * @param uId Where we're entering the section.
1569 * @param SRC_POS The source position.
1570 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
1571 * PDMCritSectRwTryEnterExclDebug,
1572 * PDMCritSectEnterDebug, PDMCritSectEnter,
1573 * RTCritSectRwEnterExclDebug.
1574 */
1575VMMDECL(int) PDMCritSectRwEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1576{
1577 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1578#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1579 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1580#else
1581 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1582 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1583#endif
1584}
1585
1586
1587/**
1588 * Try enter a critical section with exclusive (write) access.
1589 *
1590 * @retval VINF_SUCCESS on success.
1591 * @retval VERR_SEM_BUSY if the critsect was owned.
1592 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1593 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1594 * during the operation.
1595 *
1596 * @param pVM The cross context VM structure.
1597 * @param pThis Pointer to the read/write critical section.
1598 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
1599 * PDMCritSectRwEnterExclDebug,
1600 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
1601 * RTCritSectRwTryEnterExcl.
1602 */
1603VMMDECL(int) PDMCritSectRwTryEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1604{
1605#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1606 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1607#else
1608 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1609 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1610#endif
1611}
1612
1613
1614/**
1615 * Try enter a critical section with exclusive (write) access.
1616 *
1617 * @retval VINF_SUCCESS on success.
1618 * @retval VERR_SEM_BUSY if the critsect was owned.
1619 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1620 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1621 * during the operation.
1622 *
1623 * @param pVM The cross context VM structure.
1624 * @param pThis Pointer to the read/write critical section.
1625 * @param uId Where we're entering the section.
1626 * @param SRC_POS The source position.
1627 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
1628 * PDMCritSectRwEnterExclDebug,
1629 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
1630 * RTCritSectRwTryEnterExclDebug.
1631 */
1632VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1633{
1634 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1635#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1636 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1637#else
1638 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1639 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1640#endif
1641}
1642
1643
1644#ifdef IN_RING3
1645/**
1646 * Enters a PDM read/write critical section with exclusive (write) access.
1647 *
1648 * @returns VINF_SUCCESS if entered successfully.
1649 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1650 * during the operation.
1651 *
1652 * @param pVM The cross context VM structure.
1653 * @param pThis Pointer to the read/write critical section.
1654 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1655 */
1656VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
1657{
1658 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/);
1659}
1660#endif /* IN_RING3 */
1661
1662
1663/**
1664 * Leave a critical section held exclusively.
1665 *
1666 * @returns VBox status code.
1667 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1668 * during the operation.
1669 * @param pVM The cross context VM structure.
1670 * @param pThis Pointer to the read/write critical section.
1671 * @param fNoVal No validation records (i.e. queued release).
1672 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1673 */
1674static int pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
1675{
1676 /*
1677 * Validate handle.
1678 */
1679 AssertPtr(pThis);
1680 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1681
1682#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1683 NOREF(fNoVal);
1684#endif
1685
1686 /*
1687 * Check ownership.
1688 */
1689 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1690 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1691
1692 RTNATIVETHREAD hNativeWriter;
1693 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1694 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1695
1696
1697 /*
1698 * Unwind one recursion. Not the last?
1699 */
1700 if (pThis->s.Core.cWriteRecursions != 1)
1701 {
1702#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1703 if (fNoVal)
1704 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1705 else
1706 {
1707 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1708 if (RT_FAILURE(rc9))
1709 return rc9;
1710 }
1711#endif
1712#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1713 uint32_t const cDepth = --pThis->s.Core.cWriteRecursions;
1714#else
1715 uint32_t const cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1716#endif
1717 AssertReturn(cDepth != 0 && cDepth < UINT32_MAX, pdmCritSectRwCorrupted(pThis, "Invalid write recursion value on leave"));
1718 return VINF_SUCCESS;
1719 }
1720
1721
1722 /*
1723 * Final recursion.
1724 */
1725 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1726#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1727 if (fNoVal)
1728 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1729 else
1730 {
1731 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1732 if (RT_FAILURE(rc9))
1733 return rc9;
1734 }
1735#endif
1736
1737
1738#ifdef RTASM_HAVE_CMP_WRITE_U128
1739 /*
1740 * See if we can get out w/o any signalling as this is a common case.
1741 */
1742 if (pdmCritSectRwIsCmpWriteU128Supported())
1743 {
1744 RTCRITSECTRWSTATE OldState;
1745 OldState.s.u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1746 if (OldState.s.u64State == ((UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)))
1747 {
1748 OldState.s.hNativeWriter = hNativeSelf;
1749 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo));
1750
1751 RTCRITSECTRWSTATE NewState;
1752 NewState.s.u64State = RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
1753 NewState.s.hNativeWriter = NIL_RTNATIVETHREAD;
1754
1755# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1756 pThis->s.Core.cWriteRecursions = 0;
1757# else
1758 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1759# endif
1760 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1761
1762 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128))
1763 return VINF_SUCCESS;
1764
1765 /* bail out. */
1766 pThis->s.Core.cWriteRecursions = 1;
1767 }
1768 }
1769#endif /* RTASM_HAVE_CMP_WRITE_U128 */
1770
1771
1772#if defined(IN_RING3) || defined(IN_RING0)
1773 /*
1774 * Ring-3: Straight forward, just update the state and if necessary signal waiters.
1775 * Ring-0: Try leave for real, depends on host and context.
1776 */
1777# ifdef IN_RING0
1778 Assert(RTSemEventIsSignalSafe() == RTSemEventMultiIsSignalSafe());
1779 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1780 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
1781 || VMMRZCallRing3IsEnabled(pVCpu)
1782 || RTSemEventIsSignalSafe()
1783 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
1784 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
1785 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
1786 )
1787# endif
1788 {
1789# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1790 pThis->s.Core.cWriteRecursions = 0;
1791# else
1792 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1793# endif
1794 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1795 ASMAtomicWriteHandle(&pThis->s.Core.u.s.hNativeWriter, NIL_RTNATIVETHREAD);
1796
1797 for (;;)
1798 {
1799 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1800 uint64_t u64OldState = u64State;
1801
1802 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1803 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on leave"));
1804 c--;
1805
1806 if ( c > 0
1807 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1808 {
1809 /*
1810 * Don't change the direction, wake up the next writer if any.
1811 */
1812 u64State &= ~RTCSRW_CNT_WR_MASK;
1813 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1814 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1815 {
1816 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,LeaveExcl));
1817 int rc;
1818 if (c == 0)
1819 rc = VINF_SUCCESS;
1820# ifdef IN_RING0
1821 else if (!RTSemEventIsSignalSafe() && pVCpu != NULL)
1822 {
1823 VMMR0EMTBLOCKCTX Ctx;
1824 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
1825 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
1826
1827 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1828
1829 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1830 }
1831# endif
1832 else
1833 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1834 AssertRC(rc);
1835 return rc;
1836 }
1837 }
1838 else
1839 {
1840 /*
1841 * Reverse the direction and signal the reader threads.
1842 */
1843 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1844 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1845 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1846 {
1847 Assert(!pThis->s.Core.fNeedReset);
1848 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1849 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,LeaveExcl));
1850
1851 int rc;
1852# ifdef IN_RING0
1853 if (!RTSemEventMultiIsSignalSafe() && pVCpu != NULL)
1854 {
1855 VMMR0EMTBLOCKCTX Ctx;
1856 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
1857 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
1858
1859 rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1860
1861 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1862 }
1863 else
1864# endif
1865 rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1866 AssertRC(rc);
1867 return rc;
1868 }
1869 }
1870
1871 ASMNopPause();
1872 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
1873 { /*likely*/ }
1874 else
1875 return VERR_SEM_DESTROYED;
1876 ASMNopPause();
1877 }
1878 /* not reached! */
1879 }
1880#endif /* IN_RING3 || IN_RING0 */
1881
1882
1883#ifndef IN_RING3
1884 /*
1885 * Queue the requested exit for ring-3 execution.
1886 */
1887# ifndef IN_RING0
1888 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1889# endif
1890 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1891 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1892 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves),
1893 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
1894 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = pThis->s.pSelfR3;
1895 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1896 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i])
1897 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] & HOST_PAGE_OFFSET_MASK)
1898 == ((uintptr_t)pThis & HOST_PAGE_OFFSET_MASK),
1899 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i], pThis),
1900 pdmCritSectRwCorrupted(pThis, "Invalid self pointer on queue (excl)"));
1901 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1902 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1903 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1904 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1905 return VINF_SUCCESS;
1906#endif
1907}
1908
1909
1910/**
1911 * Leave a critical section held exclusively.
1912 *
1913 * @returns VBox status code.
1914 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1915 * during the operation.
1916 * @param pVM The cross context VM structure.
1917 * @param pThis Pointer to the read/write critical section.
1918 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1919 */
1920VMMDECL(int) PDMCritSectRwLeaveExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1921{
1922 return pdmCritSectRwLeaveExclWorker(pVM, pThis, false /*fNoVal*/);
1923}
1924
1925
1926#if defined(IN_RING3) || defined(IN_RING0)
1927/**
1928 * PDMCritSectBothFF interface.
1929 *
1930 * @param pVM The cross context VM structure.
1931 * @param pThis Pointer to the read/write critical section.
1932 */
1933void pdmCritSectRwLeaveExclQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
1934{
1935 pdmCritSectRwLeaveExclWorker(pVM, pThis, true /*fNoVal*/);
1936}
1937#endif
1938
1939
1940/**
1941 * Checks the caller is the exclusive (write) owner of the critical section.
1942 *
1943 * @retval true if owner.
1944 * @retval false if not owner.
1945 * @param pVM The cross context VM structure.
1946 * @param pThis Pointer to the read/write critical section.
1947 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1948 * RTCritSectRwIsWriteOwner.
1949 */
1950VMMDECL(bool) PDMCritSectRwIsWriteOwner(PVMCC pVM, PPDMCRITSECTRW pThis)
1951{
1952 /*
1953 * Validate handle.
1954 */
1955 AssertPtr(pThis);
1956 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1957
1958 /*
1959 * Check ownership.
1960 */
1961 RTNATIVETHREAD hNativeWriter;
1962 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1963 if (hNativeWriter == NIL_RTNATIVETHREAD)
1964 return false;
1965 return hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
1966}
1967
1968
1969/**
1970 * Checks if the caller is one of the read owners of the critical section.
1971 *
1972 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
1973 * enabled. Meaning, the answer is not trustworhty unless
1974 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
1975 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
1976 * creating the semaphore. And finally, if you used a locking class,
1977 * don't disable deadlock detection by setting cMsMinDeadlock to
1978 * RT_INDEFINITE_WAIT.
1979 *
1980 * In short, only use this for assertions.
1981 *
1982 * @returns @c true if reader, @c false if not.
1983 * @param pVM The cross context VM structure.
1984 * @param pThis Pointer to the read/write critical section.
1985 * @param fWannaHear What you'd like to hear when lock validation is not
1986 * available. (For avoiding asserting all over the place.)
1987 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
1988 */
1989VMMDECL(bool) PDMCritSectRwIsReadOwner(PVMCC pVM, PPDMCRITSECTRW pThis, bool fWannaHear)
1990{
1991 /*
1992 * Validate handle.
1993 */
1994 AssertPtr(pThis);
1995 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1996
1997 /*
1998 * Inspect the state.
1999 */
2000 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
2001 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
2002 {
2003 /*
2004 * It's in write mode, so we can only be a reader if we're also the
2005 * current writer.
2006 */
2007 RTNATIVETHREAD hWriter;
2008 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hWriter);
2009 if (hWriter == NIL_RTNATIVETHREAD)
2010 return false;
2011 return hWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
2012 }
2013
2014 /*
2015 * Read mode. If there are no current readers, then we cannot be a reader.
2016 */
2017 if (!(u64State & RTCSRW_CNT_RD_MASK))
2018 return false;
2019
2020#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
2021 /*
2022 * Ask the lock validator.
2023 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
2024 */
2025 NOREF(fWannaHear);
2026 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
2027#else
2028 /*
2029 * Ok, we don't know, just tell the caller what he want to hear.
2030 */
2031 return fWannaHear;
2032#endif
2033}
2034
2035
2036/**
2037 * Gets the write recursion count.
2038 *
2039 * @returns The write recursion count (0 if bad critsect).
2040 * @param pThis Pointer to the read/write critical section.
2041 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
2042 * RTCritSectRwGetWriteRecursion.
2043 */
2044VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
2045{
2046 /*
2047 * Validate handle.
2048 */
2049 AssertPtr(pThis);
2050 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2051
2052 /*
2053 * Return the requested data.
2054 */
2055 return pThis->s.Core.cWriteRecursions;
2056}
2057
2058
2059/**
2060 * Gets the read recursion count of the current writer.
2061 *
2062 * @returns The read recursion count (0 if bad critsect).
2063 * @param pThis Pointer to the read/write critical section.
2064 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
2065 * RTCritSectRwGetWriterReadRecursion.
2066 */
2067VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
2068{
2069 /*
2070 * Validate handle.
2071 */
2072 AssertPtr(pThis);
2073 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2074
2075 /*
2076 * Return the requested data.
2077 */
2078 return pThis->s.Core.cWriterReads;
2079}
2080
2081
2082/**
2083 * Gets the current number of reads.
2084 *
2085 * This includes all read recursions, so it might be higher than the number of
2086 * read owners. It does not include reads done by the current writer.
2087 *
2088 * @returns The read count (0 if bad critsect).
2089 * @param pThis Pointer to the read/write critical section.
2090 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
2091 * RTCritSectRwGetReadCount.
2092 */
2093VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
2094{
2095 /*
2096 * Validate input.
2097 */
2098 AssertPtr(pThis);
2099 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2100
2101 /*
2102 * Return the requested data.
2103 */
2104 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
2105 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
2106 return 0;
2107 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
2108}
2109
2110
2111/**
2112 * Checks if the read/write critical section is initialized or not.
2113 *
2114 * @retval true if initialized.
2115 * @retval false if not initialized.
2116 * @param pThis Pointer to the read/write critical section.
2117 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
2118 */
2119VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
2120{
2121 AssertPtr(pThis);
2122 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
2123}
2124
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use