VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 96860

Last change on this file since 96860 was 96407, checked in by vboxsync, 22 months ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 84.1 KB
RevLine 
[25426]1/* $Id: PDMAllCritSectRw.cpp 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
[45110]3 * IPRT - Read/Write Critical Section, Generic.
[25426]4 */
5
6/*
[96407]7 * Copyright (C) 2009-2022 Oracle and/or its affiliates.
[25426]8 *
[96407]9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
[25426]26 */
27
28
[57358]29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
[90677]32#define LOG_GROUP LOG_GROUP_PDM_CRITSECTRW
[45152]33#include "PDMInternal.h"
34#include <VBox/vmm/pdmcritsectrw.h>
35#include <VBox/vmm/mm.h>
36#include <VBox/vmm/vmm.h>
[80268]37#include <VBox/vmm/vmcc.h>
[45152]38#include <VBox/err.h>
39#include <VBox/vmm/hm.h>
[25426]40
[45152]41#include <VBox/log.h>
[25426]42#include <iprt/asm.h>
43#include <iprt/assert.h>
[45152]44#ifdef IN_RING3
45# include <iprt/lockvalidator.h>
46#endif
47#if defined(IN_RING3) || defined(IN_RING0)
[90659]48# include <iprt/semaphore.h>
[45152]49# include <iprt/thread.h>
50#endif
[90634]51#ifdef IN_RING0
52# include <iprt/time.h>
53#endif
[90650]54#ifdef RT_ARCH_AMD64
55# include <iprt/x86.h>
56#endif
[25426]57
58
[57358]59/*********************************************************************************************************************************
60* Defined Constants And Macros *
61*********************************************************************************************************************************/
[90673]62#if 0 /* unused */
[45152]63/** The number loops to spin for shared access in ring-3. */
[90486]64#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
[45152]65/** The number loops to spin for shared access in ring-0. */
[90486]66#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
[45152]67/** The number loops to spin for shared access in the raw-mode context. */
[90486]68#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
[25431]69
[45152]70/** The number loops to spin for exclusive access in ring-3. */
[90486]71#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
[45152]72/** The number loops to spin for exclusive access in ring-0. */
[90486]73#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
[45152]74/** The number loops to spin for exclusive access in the raw-mode context. */
[90486]75#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
[90673]76#endif
[25431]77
[90486]78/** Max number of write or write/read recursions. */
79#define PDM_CRITSECTRW_MAX_RECURSIONS _1M
[25549]80
[90654]81/** Skips some of the overly paranoid atomic reads and updates.
82 * Makes some assumptions about cache coherence, though not brave enough not to
83 * always end with an atomic update. */
84#define PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
85
86/** For reading RTCRITSECTRWSTATE::s::u64State. */
87#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
88# define PDMCRITSECTRW_READ_STATE(a_pu64State) ASMAtomicUoReadU64(a_pu64State)
89#else
90# define PDMCRITSECTRW_READ_STATE(a_pu64State) ASMAtomicReadU64(a_pu64State)
91#endif
92
93
[45152]94/* Undefine the automatic VBOX_STRICT API mappings. */
95#undef PDMCritSectRwEnterExcl
96#undef PDMCritSectRwTryEnterExcl
97#undef PDMCritSectRwEnterShared
98#undef PDMCritSectRwTryEnterShared
[25549]99
[25707]100
[90650]101/*********************************************************************************************************************************
102* Defined Constants And Macros *
103*********************************************************************************************************************************/
104#if defined(RTASM_HAVE_CMP_WRITE_U128) && defined(RT_ARCH_AMD64)
105static int32_t g_fCmpWriteSupported = -1;
106#endif
107
108
[90677]109/*********************************************************************************************************************************
110* Internal Functions *
111*********************************************************************************************************************************/
112static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
113
114
[90650]115#ifdef RTASM_HAVE_CMP_WRITE_U128
116
117# ifdef RT_ARCH_AMD64
[45152]118/**
[90650]119 * Called once to initialize g_fCmpWriteSupported.
120 */
121DECL_NO_INLINE(static, bool) pdmCritSectRwIsCmpWriteU128SupportedSlow(void)
122{
123 bool const fCmpWriteSupported = RT_BOOL(ASMCpuId_ECX(1) & X86_CPUID_FEATURE_ECX_CX16);
124 ASMAtomicWriteS32(&g_fCmpWriteSupported, fCmpWriteSupported);
125 return fCmpWriteSupported;
126}
127# endif
128
129
130/**
131 * Indicates whether hardware actually supports 128-bit compare & write.
132 */
133DECL_FORCE_INLINE(bool) pdmCritSectRwIsCmpWriteU128Supported(void)
134{
135# ifdef RT_ARCH_AMD64
136 int32_t const fCmpWriteSupported = g_fCmpWriteSupported;
137 if (RT_LIKELY(fCmpWriteSupported >= 0))
138 return fCmpWriteSupported != 0;
139 return pdmCritSectRwIsCmpWriteU128SupportedSlow();
140# else
141 return true;
142# endif
143}
144
145#endif /* RTASM_HAVE_CMP_WRITE_U128 */
146
147/**
[45152]148 * Gets the ring-3 native thread handle of the calling thread.
149 *
150 * @returns native thread handle (ring-3).
[90385]151 * @param pVM The cross context VM structure.
[45152]152 * @param pThis The read/write critical section. This is only used in
153 * R0 and RC.
154 */
[90347]155DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PVMCC pVM, PCPDMCRITSECTRW pThis)
[25707]156{
[45152]157#ifdef IN_RING3
[90347]158 RT_NOREF(pVM, pThis);
[45152]159 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
[92204]160
161#elif defined(IN_RING0)
[45299]162 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
[45152]163 NIL_RTNATIVETHREAD);
[92204]164 RTNATIVETHREAD hNativeSelf = GVMMR0GetRing3ThreadForSelf(pVM);
[90347]165 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
[92204]166
167#else
168# error "invalid context"
[45110]169#endif
[45152]170 return hNativeSelf;
171}
[25431]172
[45110]173
[90573]174DECL_NO_INLINE(static, int) pdmCritSectRwCorrupted(PPDMCRITSECTRW pThis, const char *pszMsg)
[90486]175{
176 ASMAtomicWriteU32(&pThis->s.Core.u32Magic, PDMCRITSECTRW_MAGIC_CORRUPT);
[90573]177 LogRel(("PDMCritSect: %s pCritSect=%p\n", pszMsg, pThis));
[90486]178 return VERR_PDM_CRITSECTRW_IPE;
179}
[25426]180
181
182
[45152]183#ifdef IN_RING3
184/**
185 * Changes the lock validator sub-class of the read/write critical section.
186 *
187 * It is recommended to try make sure that nobody is using this critical section
188 * while changing the value.
189 *
190 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
191 * lock validator isn't compiled in or either of the parameters are
192 * invalid.
193 * @param pThis Pointer to the read/write critical section.
194 * @param uSubClass The new sub-class value.
195 */
196VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
[25707]197{
198 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
[45152]199 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
[48199]200# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
[45152]201 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
[25707]202
[45152]203 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
204 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
205# else
[45110]206 NOREF(uSubClass);
[25707]207 return RTLOCKVAL_SUB_CLASS_INVALID;
[45152]208# endif
[25707]209}
[45152]210#endif /* IN_RING3 */
[25707]211
212
[45299]213/**
[90677]214 * Worker for pdmCritSectRwEnterShared returning with read-ownership of the CS.
[50001]215 */
[90677]216DECL_FORCE_INLINE(int) pdmCritSectRwEnterSharedGotIt(PPDMCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos,
217 bool fNoVal, RTTHREAD hThreadSelf)
[50001]218{
[90677]219#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
220 if (!fNoVal)
221 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
222#else
223 RT_NOREF(pSrcPos, fNoVal, hThreadSelf);
224#endif
225
226 /* got it! */
227 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
228 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
229 return VINF_SUCCESS;
[50001]230}
231
232/**
[90677]233 * Worker for pdmCritSectRwEnterShared and pdmCritSectRwEnterSharedBailOut
234 * that decrement the wait count and maybe resets the semaphore.
[90672]235 */
[90677]236DECLINLINE(int) pdmCritSectRwEnterSharedGotItAfterWaiting(PVMCC pVM, PPDMCRITSECTRW pThis, uint64_t u64State,
237 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
[90672]238{
239 for (;;)
240 {
241 uint64_t const u64OldState = u64State;
242 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
[90677]243 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count"));
244 AssertReturn((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT > 0,
245 pdmCritSectRwCorrupted(pThis, "Invalid read count"));
[90672]246 cWait--;
[90677]247 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
248 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
249
[90672]250 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
[90677]251 {
252 if (cWait == 0)
253 {
254 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
255 {
256 int rc = SUPSemEventMultiReset(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
257 AssertRCReturn(rc, rc);
258 }
259 }
260 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
261 }
[90672]262
263 ASMNopPause();
264 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
[90677]265 ASMNopPause();
266
267 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
[90672]268 }
[90677]269 /* not reached */
[90672]270}
271
272
[90677]273#if defined(IN_RING0) || (defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT))
[90672]274/**
[90677]275 * Worker for pdmCritSectRwEnterSharedContended that decrements both read counts
276 * and returns @a rc.
277 *
278 * @note May return VINF_SUCCESS if we race the exclusive leave function and
279 * come out on the bottom.
280 *
281 * Ring-3 only calls in a case where it is _not_ acceptable to take the
282 * lock, so even if we get the lock we'll have to leave. In the ring-0
283 * contexts, we can safely return VINF_SUCCESS in case of a race.
[90670]284 */
[90677]285DECL_NO_INLINE(static, int) pdmCritSectRwEnterSharedBailOut(PVMCC pVM, PPDMCRITSECTRW pThis, int rc,
286 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
[90670]287{
[90677]288#ifdef IN_RING0
289 uint64_t const tsStart = RTTimeNanoTS();
290 uint64_t cNsElapsed = 0;
[90670]291#endif
[90677]292 for (;;)
293 {
294 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
295 uint64_t u64OldState = u64State;
[90670]296
[90677]297 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
298 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count on bailout"));
299 cWait--;
300
301 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
302 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid read count on bailout"));
303
304 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
305 {
306 c--;
307 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
308 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
309 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
310 return rc;
311 }
312 else
313 {
314 /*
315 * The direction changed, so we can actually get the lock now.
316 *
317 * This means that we _have_ to wait on the semaphore to be signalled
318 * so we can properly reset it. Otherwise the stuff gets out of wack,
319 * because signalling and resetting will race one another. An
320 * exception would be if we're not the last reader waiting and don't
321 * need to worry about the resetting.
322 *
323 * An option would be to do the resetting in PDMCritSectRwEnterExcl,
324 * but that would still leave a racing PDMCritSectRwEnterShared
325 * spinning hard for a little bit, which isn't great...
326 */
327 if (cWait == 0)
328 {
329# ifdef IN_RING0
330 /* Do timeout processing first to avoid redoing the above. */
331 uint32_t cMsWait;
332 if (cNsElapsed <= RT_NS_10SEC)
333 cMsWait = 32;
334 else
335 {
336 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
337 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
338 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
339 {
340 LogFunc(("%p: giving up\n", pThis));
341 return rc;
342 }
343 cMsWait = 2;
344 }
345
346 int rcWait = SUPSemEventMultiWait(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, cMsWait);
347 Log11Func(("%p: rc=%Rrc %'RU64 ns (hNativeWriter=%p u64State=%#RX64)\n", pThis, rcWait,
348 RTTimeNanoTS() - tsStart, pThis->s.Core.u.s.hNativeWriter, pThis->s.Core.u.s.u64State));
349# else
350 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
351 int rcWait = SUPSemEventMultiWaitNoResume(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, RT_MS_5SEC);
352 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
353# endif
354 if (rcWait == VINF_SUCCESS)
355 {
356# ifdef IN_RING0
357 return pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
358# else
359 /* ring-3: Cannot return VINF_SUCCESS. */
360 Assert(RT_FAILURE_NP(rc));
361 int rc2 = pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
362 if (RT_SUCCESS(rc2))
363 rc2 = pdmCritSectRwLeaveSharedWorker(pVM, pThis, fNoVal);
364 return rc;
365# endif
366 }
367 AssertMsgReturn(rcWait == VERR_TIMEOUT || rcWait == VERR_INTERRUPTED,
368 ("%p: rcWait=%Rrc rc=%Rrc", pThis, rcWait, rc),
369 RT_FAILURE_NP(rcWait) ? rcWait : -rcWait);
370 }
371 else
372 {
373 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
374 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
375 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
376 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
377 }
378
379# ifdef IN_RING0
380 /* Calculate the elapsed time here to avoid redoing state work. */
381 cNsElapsed = RTTimeNanoTS() - tsStart;
382# endif
383 }
384
385 ASMNopPause();
386 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
387 ASMNopPause();
388 }
[90670]389}
[90677]390#endif /* IN_RING0 || (IN_RING3 && PDMCRITSECTRW_STRICT) */
[90670]391
392
393/**
[90671]394 * Worker for pdmCritSectRwEnterShared that handles waiting for a contended CS.
395 * Caller has already added us to the read and read-wait counters.
396 */
397static int pdmCritSectRwEnterSharedContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis,
398 int rcBusy, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
399{
[90677]400 PSUPDRVSESSION const pSession = pVM->pSession;
401 SUPSEMEVENTMULTI const hEventMulti = (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead;
402# ifdef IN_RING0
403 uint64_t const tsStart = RTTimeNanoTS();
[90910]404 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
405 uint64_t cNsMaxTotal = cNsMaxTotalDef;
[90677]406 uint32_t cMsMaxOne = RT_MS_5SEC;
407 bool fNonInterruptible = false;
[90671]408# endif
409
410 for (uint32_t iLoop = 0; ; iLoop++)
411 {
[90673]412 /*
413 * Wait for the direction to switch.
414 */
[90671]415 int rc;
416# ifdef IN_RING3
417# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
418 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
419 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
[90672]420 if (RT_FAILURE(rc))
[90677]421 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rc, pSrcPos, fNoVal, hThreadSelf);
[90671]422# else
423 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
424# endif
425# endif
[90677]426
[90672]427 for (;;)
[90671]428 {
[90677]429 /*
430 * We always wait with a timeout so we can re-check the structure sanity
431 * and not get stuck waiting on a corrupt or deleted section.
432 */
433# ifdef IN_RING3
434 rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, RT_MS_5SEC);
435# else
436 rc = !fNonInterruptible
437 ? SUPSemEventMultiWaitNoResume(pSession, hEventMulti, cMsMaxOne)
438 : SUPSemEventMultiWait(pSession, hEventMulti, cMsMaxOne);
439 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hNativeWriter=%p u64State=%#RX64)\n", pThis, rc,
440 RTTimeNanoTS() - tsStart, cMsMaxOne, pThis->s.Core.u.s.hNativeWriter, pThis->s.Core.u.s.u64State));
441# endif
442 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
443 { /* likely */ }
444 else
445 {
446# ifdef IN_RING3
447 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
448# endif
449 return VERR_SEM_DESTROYED;
450 }
451 if (RT_LIKELY(rc == VINF_SUCCESS))
[90672]452 break;
[90677]453
454 /*
455 * Timeout and interrupted waits needs careful handling in ring-0
456 * because we're cooperating with ring-3 on this critical section
457 * and thus need to make absolutely sure we won't get stuck here.
458 *
459 * The r0 interrupted case means something is pending (termination,
460 * signal, APC, debugger, whatever), so we must try our best to
461 * return to the caller and to ring-3 so it can be dealt with.
462 */
463 if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
464 {
[90671]465# ifdef IN_RING0
[90677]466 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
467 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
468 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
469 ("rcTerm=%Rrc\n", rcTerm));
[90910]470 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
[90677]471 cNsMaxTotal = RT_NS_1MIN;
472
473 if (rc == VERR_TIMEOUT)
474 {
475 /* Try return get out of here with a non-VINF_SUCCESS status if
476 the thread is terminating or if the timeout has been exceeded. */
477 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedVerrTimeout);
478 if ( rcTerm == VINF_THREAD_IS_TERMINATING
479 || cNsElapsed > cNsMaxTotal)
480 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc,
481 pSrcPos, fNoVal, hThreadSelf);
482 }
483 else
484 {
485 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
486 we will try non-interruptible sleep for a while to help resolve the issue
487 w/o guru'ing. */
488 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedVerrInterrupted);
489 if ( rcTerm != VINF_THREAD_IS_TERMINATING
490 && rcBusy == VINF_SUCCESS
491 && pVCpu != NULL
492 && cNsElapsed <= cNsMaxTotal)
493 {
494 if (!fNonInterruptible)
495 {
496 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedNonInterruptibleWaits);
497 fNonInterruptible = true;
498 cMsMaxOne = 32;
499 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
500 if (cNsLeft > RT_NS_10SEC)
501 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
502 }
503 }
504 else
505 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc,
506 pSrcPos, fNoVal, hThreadSelf);
507 }
508# else /* IN_RING3 */
509 RT_NOREF(pVM, pVCpu, rcBusy);
510# endif /* IN_RING3 */
511 }
512 /*
513 * Any other return code is fatal.
514 */
515 else
516 {
517# ifdef IN_RING3
518 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
[90671]519# endif
[90677]520 AssertMsgFailed(("rc=%Rrc\n", rc));
521 return RT_FAILURE_NP(rc) ? rc : -rc;
522 }
[90672]523 }
[90677]524
[90671]525# ifdef IN_RING3
[90672]526 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
[90671]527# endif
[90672]528
529 /*
530 * Check the direction.
531 */
532 Assert(pThis->s.Core.fNeedReset);
533 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
534 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
[90671]535 {
[90672]536 /*
537 * Decrement the wait count and maybe reset the semaphore (if we're last).
538 */
[90677]539 return pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
[90671]540 }
541
[90677]542 AssertMsg(iLoop < 1,
543 ("%p: %u u64State=%#RX64 hNativeWriter=%p\n", pThis, iLoop, u64State, pThis->s.Core.u.s.hNativeWriter));
[90672]544 RTThreadYield();
[90671]545 }
546
547 /* not reached */
548}
549
550
551/**
[45299]552 * Worker that enters a read/write critical section with shard access.
553 *
554 * @returns VBox status code.
[90347]555 * @param pVM The cross context VM structure.
[45299]556 * @param pThis Pointer to the read/write critical section.
557 * @param rcBusy The busy return code for ring-0 and ring-3.
558 * @param fTryOnly Only try enter it, don't wait.
559 * @param pSrcPos The source position. (Can be NULL.)
560 * @param fNoVal No validation records.
561 */
[90347]562static int pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
563 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
[25431]564{
[25663]565 /*
566 * Validate input.
567 */
[45110]568 AssertPtr(pThis);
[45152]569 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
[57851]570
[45189]571#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
[25663]572 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
[45110]573 if (!fTryOnly)
[25663]574 {
[25710]575 int rc9;
576 RTNATIVETHREAD hNativeWriter;
[90637]577 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
[90347]578 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis))
[45152]579 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
[25710]580 else
[45152]581 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
[25663]582 if (RT_FAILURE(rc9))
583 return rc9;
584 }
[90670]585#else
586 RTTHREAD hThreadSelf = NIL_RTTHREAD;
[25663]587#endif
[25549]588
[25663]589 /*
[90673]590 * Work the state.
[25663]591 */
[90654]592 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
[25663]593 uint64_t u64OldState = u64State;
[25431]594 for (;;)
595 {
[45110]596 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
[25431]597 {
[25549]598 /* It flows in the right direction, try follow it before it changes. */
[45110]599 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
[25431]600 c++;
[90486]601 Assert(c < RTCSRW_CNT_MASK / 4);
602 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
[45110]603 u64State &= ~RTCSRW_CNT_RD_MASK;
604 u64State |= c << RTCSRW_CNT_RD_SHIFT;
[90637]605 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
[90670]606 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
[25431]607 }
[45110]608 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
[25431]609 {
610 /* Wrong direction, but we're alone here and can simply try switch the direction. */
[45110]611 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
612 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
[90637]613 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
[25549]614 {
[45152]615 Assert(!pThis->s.Core.fNeedReset);
[90670]616 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
[25549]617 }
[25431]618 }
619 else
620 {
[25663]621 /* Is the writer perhaps doing a read recursion? */
622 RTNATIVETHREAD hNativeWriter;
[90637]623 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
[90486]624 if (hNativeWriter != NIL_RTNATIVETHREAD)
[25663]625 {
[90486]626 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
627 if (hNativeSelf == hNativeWriter)
628 {
[45189]629#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
[90486]630 if (!fNoVal)
631 {
632 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
633 if (RT_FAILURE(rc9))
634 return rc9;
635 }
636#endif
637 uint32_t const cReads = ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
638 Assert(cReads < _16K);
639 AssertReturnStmt(cReads < PDM_CRITSECTRW_MAX_RECURSIONS, ASMAtomicDecU32(&pThis->s.Core.cWriterReads),
640 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
641 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
642 return VINF_SUCCESS; /* don't break! */
[45299]643 }
[25663]644 }
645
[45299]646 /*
647 * If we're only trying, return already.
648 */
[45110]649 if (fTryOnly)
[45299]650 {
651 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
[45110]652 return VERR_SEM_BUSY;
[45299]653 }
[25663]654
[48199]655#if defined(IN_RING3) || defined(IN_RING0)
[90677]656 /*
657 * Add ourselves to the queue and wait for the direction to change.
658 */
659 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
660 c++;
661 Assert(c < RTCSRW_CNT_MASK / 2);
662 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
[25549]663
[90677]664 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
665 cWait++;
666 Assert(cWait <= c);
667 Assert(cWait < RTCSRW_CNT_MASK / 2);
668 AssertReturn(cWait < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
[25549]669
[90677]670 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
671 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
[25549]672
[90677]673 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
[45293]674 {
[48199]675 /*
[90677]676 * In ring-3 it's straight forward, just optimize the RTThreadSelf() call.
[48199]677 */
[90677]678# if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
679 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
680# elif defined(IN_RING3)
681 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, RTThreadSelf());
682# else /* IN_RING0 */
683 /*
684 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
685 * account when waiting on contended locks.
686 */
687 PVMCPUCC pVCpu = VMMGetCpu(pVM);
688 if (pVCpu)
[48199]689 {
[90677]690 VMMR0EMTBLOCKCTX Ctx;
691 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pThis, &Ctx);
692 if (rc == VINF_SUCCESS)
693 {
694 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
695
696 rc = pdmCritSectRwEnterSharedContended(pVM, pVCpu, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
697
698 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
699 }
700 else
701 {
702 //STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLockBusy);
703 rc = pdmCritSectRwEnterSharedBailOut(pVM, pThis, rc, pSrcPos, fNoVal, hThreadSelf);
704 }
705 return rc;
[48199]706 }
[90677]707
708 /* Non-EMT. */
709 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
710 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
711# endif /* IN_RING0 */
[45293]712 }
[90677]713
714#else /* !IN_RING3 && !IN_RING0 */
715 /*
716 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
717 * back to ring-3 and do it there or return rcBusy.
718 */
719# error "Unused code."
720 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
721 if (rcBusy == VINF_SUCCESS)
722 {
723 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
724 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
725 * back to ring-3. Goes for both kind of crit sects. */
726 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
727 }
728 return rcBusy;
729#endif /* !IN_RING3 && !IN_RING0 */
[25431]730 }
731
[90670]732 ASMNopPause();
733 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
734 { /* likely */ }
735 else
[25431]736 return VERR_SEM_DESTROYED;
[90670]737 ASMNopPause();
[25549]738
[90654]739 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
[25549]740 u64OldState = u64State;
[25431]741 }
[90670]742 /* not reached */
[25431]743}
744
745
[45152]746/**
747 * Enter a critical section with shared (read) access.
748 *
749 * @returns VBox status code.
750 * @retval VINF_SUCCESS on success.
[58116]751 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
[45152]752 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
753 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
754 * during the operation.
755 *
[90347]756 * @param pVM The cross context VM structure.
[45152]757 * @param pThis Pointer to the read/write critical section.
758 * @param rcBusy The status code to return when we're in RC or R0 and the
759 * section is busy. Pass VINF_SUCCESS to acquired the
760 * critical section thru a ring-3 call if necessary.
761 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
762 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
763 * RTCritSectRwEnterShared.
764 */
[90347]765VMMDECL(int) PDMCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
[25431]766{
[45293]767#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
[90347]768 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
[25663]769#else
770 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
[90347]771 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
[25663]772#endif
773}
[25431]774
775
[45152]776/**
777 * Enter a critical section with shared (read) access.
778 *
779 * @returns VBox status code.
780 * @retval VINF_SUCCESS on success.
[58126]781 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
[45152]782 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
783 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
784 * during the operation.
785 *
[90347]786 * @param pVM The cross context VM structure.
[45152]787 * @param pThis Pointer to the read/write critical section.
788 * @param rcBusy The status code to return when we're in RC or R0 and the
789 * section is busy. Pass VINF_SUCCESS to acquired the
790 * critical section thru a ring-3 call if necessary.
791 * @param uId Where we're entering the section.
[58116]792 * @param SRC_POS The source position.
[45152]793 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
794 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
795 * RTCritSectRwEnterSharedDebug.
796 */
[90347]797VMMDECL(int) PDMCritSectRwEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
[25663]798{
[49486]799 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
[45293]800#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
[90347]801 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
[45293]802#else
[25663]803 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
[90347]804 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
[45293]805#endif
[25663]806}
[25431]807
808
[45152]809/**
810 * Try enter a critical section with shared (read) access.
811 *
812 * @returns VBox status code.
813 * @retval VINF_SUCCESS on success.
814 * @retval VERR_SEM_BUSY if the critsect was owned.
815 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
816 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
817 * during the operation.
818 *
[90347]819 * @param pVM The cross context VM structure.
[45152]820 * @param pThis Pointer to the read/write critical section.
821 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
822 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
823 * RTCritSectRwTryEnterShared.
824 */
[90347]825VMMDECL(int) PDMCritSectRwTryEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis)
[25663]826{
[45293]827#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
[90347]828 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
[25663]829#else
830 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
[90347]831 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
[25663]832#endif
833}
834
835
[45152]836/**
837 * Try enter a critical section with shared (read) access.
838 *
839 * @returns VBox status code.
840 * @retval VINF_SUCCESS on success.
841 * @retval VERR_SEM_BUSY if the critsect was owned.
842 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
843 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
844 * during the operation.
845 *
[90347]846 * @param pVM The cross context VM structure.
[45152]847 * @param pThis Pointer to the read/write critical section.
848 * @param uId Where we're entering the section.
[58116]849 * @param SRC_POS The source position.
[45152]850 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
851 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
852 * RTCritSectRwTryEnterSharedDebug.
853 */
[90347]854VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
[25663]855{
[49486]856 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
[45293]857#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
[90347]858 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
[45293]859#else
[25663]860 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
[90347]861 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
[45293]862#endif
[25663]863}
864
865
[45293]866#ifdef IN_RING3
[45152]867/**
[45293]868 * Enters a PDM read/write critical section with shared (read) access.
869 *
870 * @returns VINF_SUCCESS if entered successfully.
871 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
872 * during the operation.
873 *
[90347]874 * @param pVM The cross context VM structure.
[45293]875 * @param pThis Pointer to the read/write critical section.
876 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
877 */
[90347]878VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
[45293]879{
[90347]880 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3);
[45293]881}
[45299]882#endif
[45293]883
884
885/**
[45152]886 * Leave a critical section held with shared access.
887 *
888 * @returns VBox status code.
889 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
890 * during the operation.
[90347]891 * @param pVM The cross context VM structure.
[48936]892 * @param pThis Pointer to the read/write critical section.
[45299]893 * @param fNoVal No validation records (i.e. queued release).
[45152]894 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
895 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
896 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
897 */
[90347]898static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
[25663]899{
900 /*
901 * Validate handle.
902 */
[45110]903 AssertPtr(pThis);
[45152]904 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
[25663]905
[57851]906#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
907 NOREF(fNoVal);
908#endif
909
[25663]910 /*
911 * Check the direction and take action accordingly.
912 */
[90667]913#ifdef IN_RING0
914 PVMCPUCC pVCpu = NULL;
915#endif
[90654]916 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
[25663]917 uint64_t u64OldState = u64State;
[45110]918 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
[25663]919 {
[45189]920#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
[45299]921 if (fNoVal)
922 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
923 else
924 {
925 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
926 if (RT_FAILURE(rc9))
927 return rc9;
928 }
[25663]929#endif
930 for (;;)
[25431]931 {
[45110]932 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
[25663]933 AssertReturn(c > 0, VERR_NOT_OWNER);
934 c--;
935
936 if ( c > 0
[45310]937 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
[25549]938 {
[25663]939 /* Don't change the direction. */
[45110]940 u64State &= ~RTCSRW_CNT_RD_MASK;
941 u64State |= c << RTCSRW_CNT_RD_SHIFT;
[90637]942 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
[25663]943 break;
[25549]944 }
[25663]945 else
946 {
[48199]947#if defined(IN_RING3) || defined(IN_RING0)
948# ifdef IN_RING0
[90667]949 Assert(RTSemEventIsSignalSafe() == RTSemEventMultiIsSignalSafe());
950 if (!pVCpu)
951 pVCpu = VMMGetCpu(pVM);
952 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
953 || VMMRZCallRing3IsEnabled(pVCpu)
954 || RTSemEventIsSignalSafe()
955 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
956 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
957 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
958 )
[48199]959# endif
[25663]960 {
[48199]961 /* Reverse the direction and signal the writer threads. */
962 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
963 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
[90637]964 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
[48199]965 {
[90667]966 int rc;
967# ifdef IN_RING0
968 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
969 if (!RTSemEventIsSignalSafe() && pVCpu != NULL)
970 {
971 VMMR0EMTBLOCKCTX Ctx;
972 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
973 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
974
975 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
976
977 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
978 }
979 else
980# endif
981 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
[48199]982 AssertRC(rc);
[90667]983 return rc;
[48199]984 }
985 }
986#endif /* IN_RING3 || IN_RING0 */
987#ifndef IN_RING3
988# ifdef IN_RING0
989 else
990# endif
991 {
992 /* Queue the exit request (ring-3). */
[90667]993# ifndef IN_RING0
994 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
995# endif
[48199]996 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
997 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
[90571]998 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves),
999 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
[90570]1000 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = pThis->s.pSelfR3;
[90571]1001 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1002 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i])
[93554]1003 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] & HOST_PAGE_OFFSET_MASK)
1004 == ((uintptr_t)pThis & HOST_PAGE_OFFSET_MASK),
[90571]1005 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i], pThis),
[90573]1006 pdmCritSectRwCorrupted(pThis, "Invalid self pointer"));
[48199]1007 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1008 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1009 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1010 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
[25663]1011 break;
1012 }
[45293]1013#endif
[25663]1014 }
1015
1016 ASMNopPause();
[90667]1017 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
1018 { }
1019 else
1020 return VERR_SEM_DESTROYED;
1021 ASMNopPause();
1022
[90654]1023 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
[25663]1024 u64OldState = u64State;
[25431]1025 }
[25663]1026 }
1027 else
1028 {
[90486]1029 /*
1030 * Write direction. Check that it's the owner calling and that it has reads to undo.
1031 */
[90347]1032 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
[90486]1033 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1034
[25663]1035 RTNATIVETHREAD hNativeWriter;
[90637]1036 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
[25663]1037 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
[45152]1038 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
[45189]1039#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
[45299]1040 if (!fNoVal)
1041 {
1042 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
1043 if (RT_FAILURE(rc))
1044 return rc;
1045 }
[25663]1046#endif
[90486]1047 uint32_t cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
[90573]1048 AssertReturn(cDepth < PDM_CRITSECTRW_MAX_RECURSIONS, pdmCritSectRwCorrupted(pThis, "too many writer-read recursions"));
[25663]1049 }
1050
1051 return VINF_SUCCESS;
1052}
1053
[90486]1054
[45299]1055/**
1056 * Leave a critical section held with shared access.
1057 *
1058 * @returns VBox status code.
1059 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1060 * during the operation.
[90347]1061 * @param pVM The cross context VM structure.
[45299]1062 * @param pThis Pointer to the read/write critical section.
1063 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
1064 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
1065 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
1066 */
[90347]1067VMMDECL(int) PDMCritSectRwLeaveShared(PVMCC pVM, PPDMCRITSECTRW pThis)
[45299]1068{
[90347]1069 return pdmCritSectRwLeaveSharedWorker(pVM, pThis, false /*fNoVal*/);
[45299]1070}
[25663]1071
[45299]1072
1073#if defined(IN_RING3) || defined(IN_RING0)
1074/**
1075 * PDMCritSectBothFF interface.
1076 *
[90347]1077 * @param pVM The cross context VM structure.
[45299]1078 * @param pThis Pointer to the read/write critical section.
1079 */
[90347]1080void pdmCritSectRwLeaveSharedQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
[25663]1081{
[90347]1082 pdmCritSectRwLeaveSharedWorker(pVM, pThis, true /*fNoVal*/);
[45299]1083}
1084#endif
1085
1086
1087/**
[90608]1088 * Worker for pdmCritSectRwEnterExcl that bails out on wait failure.
1089 *
1090 * @returns @a rc unless corrupted.
[90610]1091 * @param pThis Pointer to the read/write critical section.
[90608]1092 * @param rc The status to return.
1093 */
1094DECL_NO_INLINE(static, int) pdmCritSectRwEnterExclBailOut(PPDMCRITSECTRW pThis, int rc)
1095{
1096 /*
1097 * Decrement the counts and return the error.
1098 */
1099 for (;;)
1100 {
[90654]1101 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
[90608]1102 uint64_t const u64OldState = u64State;
1103 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1104 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on bailout"));
1105 c--;
1106 u64State &= ~RTCSRW_CNT_WR_MASK;
1107 u64State |= c << RTCSRW_CNT_WR_SHIFT;
[90637]1108 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
[90608]1109 return rc;
1110
1111 ASMNopPause();
1112 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1113 ASMNopPause();
1114 }
1115}
1116
[90611]1117
1118/**
1119 * Worker for pdmCritSectRwEnterExcl that handles the red tape after we've
1120 * gotten exclusive ownership of the critical section.
1121 */
[90665]1122DECL_FORCE_INLINE(int) pdmCritSectRwEnterExclFirst(PPDMCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos,
1123 bool fNoVal, RTTHREAD hThreadSelf)
[90611]1124{
1125 RT_NOREF(hThreadSelf, fNoVal, pSrcPos);
[90654]1126 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
[90611]1127
[90654]1128#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1129 pThis->s.Core.cWriteRecursions = 1;
1130#else
[90611]1131 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
1132#endif
1133 Assert(pThis->s.Core.cWriterReads == 0);
1134
1135#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1136 if (!fNoVal)
1137 {
1138 if (hThreadSelf == NIL_RTTHREAD)
1139 hThreadSelf = RTThreadSelfAutoAdopt();
1140 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
1141 }
1142#endif
1143 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
1144 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
1145 return VINF_SUCCESS;
1146}
1147
1148
[90610]1149#if defined(IN_RING3) || defined(IN_RING0)
1150/**
1151 * Worker for pdmCritSectRwEnterExcl that handles waiting when the section is
1152 * contended.
1153 */
1154static int pdmR3R0CritSectRwEnterExclContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis, RTNATIVETHREAD hNativeSelf,
[90611]1155 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, int rcBusy, RTTHREAD hThreadSelf)
[90610]1156{
[90611]1157 RT_NOREF(hThreadSelf, rcBusy, pSrcPos, fNoVal, pVCpu);
[90608]1158
[90634]1159 PSUPDRVSESSION const pSession = pVM->pSession;
1160 SUPSEMEVENT const hEvent = (SUPSEMEVENT)pThis->s.Core.hEvtWrite;
1161# ifdef IN_RING0
1162 uint64_t const tsStart = RTTimeNanoTS();
[90910]1163 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
1164 uint64_t cNsMaxTotal = cNsMaxTotalDef;
[90634]1165 uint32_t cMsMaxOne = RT_MS_5SEC;
1166 bool fNonInterruptible = false;
1167# endif
1168
[90610]1169 for (uint32_t iLoop = 0; ; iLoop++)
1170 {
1171 /*
1172 * Wait for our turn.
1173 */
1174 int rc;
1175# ifdef IN_RING3
1176# ifdef PDMCRITSECTRW_STRICT
1177 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
1178 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
1179 if (RT_SUCCESS(rc))
[90634]1180 { /* likely */ }
1181 else
1182 return pdmCritSectRwEnterExclBailOut(pThis, rc);
[90610]1183# else
1184 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
1185# endif
1186# endif
[90677]1187
[90634]1188 for (;;)
[90610]1189 {
[90634]1190 /*
1191 * We always wait with a timeout so we can re-check the structure sanity
1192 * and not get stuck waiting on a corrupt or deleted section.
1193 */
1194# ifdef IN_RING3
1195 rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_MS_5SEC);
1196# else
1197 rc = !fNonInterruptible
1198 ? SUPSemEventWaitNoResume(pSession, hEvent, cMsMaxOne)
1199 : SUPSemEventWait(pSession, hEvent, cMsMaxOne);
1200 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hNativeWriter=%p)\n",
[90637]1201 pThis, rc, RTTimeNanoTS() - tsStart, cMsMaxOne, pThis->s.Core.u.s.hNativeWriter));
[90634]1202# endif
1203 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
1204 { /* likely */ }
1205 else
[90610]1206 {
[90634]1207# ifdef IN_RING3
1208 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
[90610]1209# endif
[90634]1210 return VERR_SEM_DESTROYED;
[90610]1211 }
[90677]1212 if (RT_LIKELY(rc == VINF_SUCCESS))
[90634]1213 break;
[90610]1214
[90634]1215 /*
1216 * Timeout and interrupted waits needs careful handling in ring-0
1217 * because we're cooperating with ring-3 on this critical section
1218 * and thus need to make absolutely sure we won't get stuck here.
1219 *
1220 * The r0 interrupted case means something is pending (termination,
1221 * signal, APC, debugger, whatever), so we must try our best to
1222 * return to the caller and to ring-3 so it can be dealt with.
1223 */
[90677]1224 if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
[90634]1225 {
1226# ifdef IN_RING0
1227 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
1228 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
1229 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
1230 ("rcTerm=%Rrc\n", rcTerm));
[90910]1231 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
[90634]1232 cNsMaxTotal = RT_NS_1MIN;
1233
1234 if (rc == VERR_TIMEOUT)
1235 {
1236 /* Try return get out of here with a non-VINF_SUCCESS status if
1237 the thread is terminating or if the timeout has been exceeded. */
1238 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclVerrTimeout);
1239 if ( rcTerm == VINF_THREAD_IS_TERMINATING
1240 || cNsElapsed > cNsMaxTotal)
1241 return pdmCritSectRwEnterExclBailOut(pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc);
1242 }
1243 else
1244 {
1245 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
1246 we will try non-interruptible sleep for a while to help resolve the issue
1247 w/o guru'ing. */
1248 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclVerrInterrupted);
1249 if ( rcTerm != VINF_THREAD_IS_TERMINATING
1250 && rcBusy == VINF_SUCCESS
1251 && pVCpu != NULL
1252 && cNsElapsed <= cNsMaxTotal)
1253 {
1254 if (!fNonInterruptible)
1255 {
1256 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclNonInterruptibleWaits);
1257 fNonInterruptible = true;
1258 cMsMaxOne = 32;
1259 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
1260 if (cNsLeft > RT_NS_10SEC)
1261 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
1262 }
1263 }
1264 else
1265 return pdmCritSectRwEnterExclBailOut(pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc);
1266 }
1267# else /* IN_RING3 */
1268 RT_NOREF(pVM, pVCpu, rcBusy);
1269# endif /* IN_RING3 */
1270 }
1271 /*
1272 * Any other return code is fatal.
1273 */
1274 else
1275 {
[90610]1276# ifdef IN_RING3
[90634]1277 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
[90610]1278# endif
[90634]1279 AssertMsgFailed(("rc=%Rrc\n", rc));
1280 return RT_FAILURE_NP(rc) ? rc : -rc;
1281 }
[90610]1282 }
1283
[90677]1284# ifdef IN_RING3
1285 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1286# endif
1287
[90610]1288 /*
1289 * Try take exclusive write ownership.
1290 */
[90654]1291 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
[90610]1292 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1293 {
1294 bool fDone;
[90637]1295 ASMAtomicCmpXchgHandle(&pThis->s.Core.u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
[90610]1296 if (fDone)
[90611]1297 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
[90610]1298 }
1299 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
1300 }
1301}
1302#endif /* IN_RING3 || IN_RING0 */
1303
1304
[90608]1305/**
[45299]1306 * Worker that enters a read/write critical section with exclusive access.
1307 *
1308 * @returns VBox status code.
[90347]1309 * @param pVM The cross context VM structure.
[45299]1310 * @param pThis Pointer to the read/write critical section.
1311 * @param rcBusy The busy return code for ring-0 and ring-3.
1312 * @param fTryOnly Only try enter it, don't wait.
1313 * @param pSrcPos The source position. (Can be NULL.)
1314 * @param fNoVal No validation records.
1315 */
[90347]1316static int pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
1317 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
[45299]1318{
[25663]1319 /*
1320 * Validate input.
1321 */
[45110]1322 AssertPtr(pThis);
[45152]1323 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
[25663]1324
[90611]1325 RTTHREAD hThreadSelf = NIL_RTTHREAD;
[45189]1326#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
[45110]1327 if (!fTryOnly)
[25663]1328 {
1329 hThreadSelf = RTThreadSelfAutoAdopt();
[45152]1330 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
[25663]1331 if (RT_FAILURE(rc9))
1332 return rc9;
1333 }
1334#endif
1335
1336 /*
1337 * Check if we're already the owner and just recursing.
1338 */
[90486]1339 RTNATIVETHREAD const hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1340 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
[25663]1341 RTNATIVETHREAD hNativeWriter;
[90637]1342 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
[25663]1343 if (hNativeSelf == hNativeWriter)
1344 {
[90654]1345 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
[45189]1346#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
[45299]1347 if (!fNoVal)
1348 {
1349 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
1350 if (RT_FAILURE(rc9))
1351 return rc9;
1352 }
[25663]1353#endif
[45178]1354 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
[90654]1355#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1356 uint32_t const cDepth = ++pThis->s.Core.cWriteRecursions;
1357#else
[90486]1358 uint32_t const cDepth = ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
[90654]1359#endif
[90486]1360 AssertReturnStmt(cDepth > 1 && cDepth <= PDM_CRITSECTRW_MAX_RECURSIONS,
1361 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions),
1362 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
[25663]1363 return VINF_SUCCESS;
1364 }
1365
1366 /*
[90665]1367 * First we try grab an idle critical section using 128-bit atomics.
[25663]1368 */
[90665]1369 /** @todo This could be moved up before the recursion check. */
1370 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1371#ifdef RTASM_HAVE_CMP_WRITE_U128
1372 if ( (u64State & ~RTCSRW_DIR_MASK) == 0
1373 && pdmCritSectRwIsCmpWriteU128Supported())
1374 {
1375 RTCRITSECTRWSTATE OldState;
1376 OldState.s.u64State = u64State;
1377 OldState.s.hNativeWriter = NIL_RTNATIVETHREAD;
1378 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo));
1379
1380 RTCRITSECTRWSTATE NewState;
1381 NewState.s.u64State = (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
1382 NewState.s.hNativeWriter = hNativeSelf;
1383
1384 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128))
1385 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1386
1387 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1388 }
1389#endif
1390
1391 /*
1392 * Do it step by step. Update the state to reflect our desire.
1393 */
[25663]1394 uint64_t u64OldState = u64State;
1395
1396 for (;;)
1397 {
[45110]1398 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
1399 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
[25663]1400 {
1401 /* It flows in the right direction, try follow it before it changes. */
[45110]1402 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
[90486]1403 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
[25663]1404 c++;
[90486]1405 Assert(c < RTCSRW_CNT_WR_MASK / 4);
[45110]1406 u64State &= ~RTCSRW_CNT_WR_MASK;
1407 u64State |= c << RTCSRW_CNT_WR_SHIFT;
[90637]1408 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
[25663]1409 break;
1410 }
[45110]1411 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
[25663]1412 {
1413 /* Wrong direction, but we're alone here and can simply try switch the direction. */
[45110]1414 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1415 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
[90637]1416 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
[25663]1417 break;
1418 }
[45110]1419 else if (fTryOnly)
[45299]1420 {
[25663]1421 /* Wrong direction and we're not supposed to wait, just return. */
[45299]1422 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
[45110]1423 return VERR_SEM_BUSY;
[45299]1424 }
[25431]1425 else
1426 {
[25663]1427 /* Add ourselves to the write count and break out to do the wait. */
[45110]1428 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
[90486]1429 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
[25663]1430 c++;
[90486]1431 Assert(c < RTCSRW_CNT_WR_MASK / 4);
[45110]1432 u64State &= ~RTCSRW_CNT_WR_MASK;
1433 u64State |= c << RTCSRW_CNT_WR_SHIFT;
[90637]1434 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
[25663]1435 break;
1436 }
1437
[90611]1438 ASMNopPause();
1439
1440 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
1441 { /* likely */ }
1442 else
[25663]1443 return VERR_SEM_DESTROYED;
1444
1445 ASMNopPause();
[90654]1446 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
[25663]1447 u64OldState = u64State;
1448 }
1449
1450 /*
1451 * If we're in write mode now try grab the ownership. Play fair if there
[90659]1452 * are threads already waiting.
[25663]1453 */
[45110]1454 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
1455 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
[90659]1456 || fTryOnly);
[25663]1457 if (fDone)
[90611]1458 {
[90637]1459 ASMAtomicCmpXchgHandle(&pThis->s.Core.u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
[90611]1460 if (fDone)
1461 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1462 }
[45299]1463
[90611]1464 /*
1465 * Okay, we have contention and will have to wait unless we're just trying.
1466 */
[90612]1467 if (fTryOnly)
1468 {
1469 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl)); /** @todo different statistics for this */
1470 return pdmCritSectRwEnterExclBailOut(pThis, VERR_SEM_BUSY);
1471 }
1472
[90611]1473 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
[90634]1474
1475 /*
1476 * Ring-3 is pretty straight forward.
1477 */
1478#if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
1479 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, hThreadSelf);
1480#elif defined(IN_RING3)
1481 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, RTThreadSelf());
1482
1483#elif defined(IN_RING0)
1484 /*
1485 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
1486 * account when waiting on contended locks.
1487 */
1488 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1489 if (pVCpu)
[90611]1490 {
[90634]1491 VMMR0EMTBLOCKCTX Ctx;
1492 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pThis, &Ctx);
1493 if (rc == VINF_SUCCESS)
1494 {
1495 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1496
1497 rc = pdmR3R0CritSectRwEnterExclContended(pVM, pVCpu, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1498
1499 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1500 }
1501 else
1502 {
1503 //STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLockBusy);
1504 rc = pdmCritSectRwEnterExclBailOut(pThis, rc);
1505 }
1506 return rc;
[90611]1507 }
1508
[90634]1509 /* Non-EMT. */
1510 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1511 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1512
1513#else
1514# error "Unused."
1515 /*
1516 * Raw-mode: Call host and take it there if rcBusy is VINF_SUCCESS.
1517 */
[90611]1518 rcBusy = pdmCritSectRwEnterExclBailOut(pThis, rcBusy);
1519 if (rcBusy == VINF_SUCCESS)
1520 {
1521 Assert(!fTryOnly);
1522 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1523 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
1524 * back to ring-3. Goes for both kind of crit sects. */
1525 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
[25663]1526 }
[90611]1527 return rcBusy;
[90608]1528#endif
[25663]1529}
1530
1531
[45152]1532/**
1533 * Try enter a critical section with exclusive (write) access.
1534 *
1535 * @returns VBox status code.
1536 * @retval VINF_SUCCESS on success.
[58116]1537 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
[45152]1538 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1539 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1540 * during the operation.
1541 *
[90347]1542 * @param pVM The cross context VM structure.
[45152]1543 * @param pThis Pointer to the read/write critical section.
1544 * @param rcBusy The status code to return when we're in RC or R0 and the
1545 * section is busy. Pass VINF_SUCCESS to acquired the
1546 * critical section thru a ring-3 call if necessary.
1547 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
1548 * PDMCritSectRwTryEnterExclDebug,
1549 * PDMCritSectEnterDebug, PDMCritSectEnter,
1550 * RTCritSectRwEnterExcl.
1551 */
[90347]1552VMMDECL(int) PDMCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
[25663]1553{
[45293]1554#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
[90347]1555 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
[25663]1556#else
1557 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
[90347]1558 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
[25663]1559#endif
1560}
1561
1562
[45152]1563/**
1564 * Try enter a critical section with exclusive (write) access.
1565 *
1566 * @returns VBox status code.
1567 * @retval VINF_SUCCESS on success.
[58116]1568 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
[45152]1569 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1570 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1571 * during the operation.
1572 *
[90347]1573 * @param pVM The cross context VM structure.
[45152]1574 * @param pThis Pointer to the read/write critical section.
1575 * @param rcBusy The status code to return when we're in RC or R0 and the
1576 * section is busy. Pass VINF_SUCCESS to acquired the
1577 * critical section thru a ring-3 call if necessary.
1578 * @param uId Where we're entering the section.
[58116]1579 * @param SRC_POS The source position.
[45152]1580 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
1581 * PDMCritSectRwTryEnterExclDebug,
1582 * PDMCritSectEnterDebug, PDMCritSectEnter,
1583 * RTCritSectRwEnterExclDebug.
1584 */
[90347]1585VMMDECL(int) PDMCritSectRwEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
[25663]1586{
[49486]1587 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
[45293]1588#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
[90347]1589 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
[45293]1590#else
[25663]1591 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
[90347]1592 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
[45293]1593#endif
[25663]1594}
1595
1596
[45152]1597/**
1598 * Try enter a critical section with exclusive (write) access.
1599 *
1600 * @retval VINF_SUCCESS on success.
1601 * @retval VERR_SEM_BUSY if the critsect was owned.
1602 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1603 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1604 * during the operation.
1605 *
[90347]1606 * @param pVM The cross context VM structure.
[45152]1607 * @param pThis Pointer to the read/write critical section.
1608 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
1609 * PDMCritSectRwEnterExclDebug,
1610 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
1611 * RTCritSectRwTryEnterExcl.
1612 */
[90347]1613VMMDECL(int) PDMCritSectRwTryEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
[25663]1614{
[45293]1615#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
[90347]1616 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
[25663]1617#else
1618 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
[90347]1619 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
[25663]1620#endif
1621}
1622
1623
[45152]1624/**
1625 * Try enter a critical section with exclusive (write) access.
1626 *
1627 * @retval VINF_SUCCESS on success.
1628 * @retval VERR_SEM_BUSY if the critsect was owned.
1629 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1630 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1631 * during the operation.
1632 *
[90347]1633 * @param pVM The cross context VM structure.
[45152]1634 * @param pThis Pointer to the read/write critical section.
1635 * @param uId Where we're entering the section.
[58116]1636 * @param SRC_POS The source position.
[45152]1637 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
1638 * PDMCritSectRwEnterExclDebug,
1639 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
1640 * RTCritSectRwTryEnterExclDebug.
1641 */
[90347]1642VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
[25663]1643{
[49486]1644 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
[45293]1645#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
[90347]1646 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
[45293]1647#else
[25663]1648 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
[90347]1649 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
[45293]1650#endif
[25663]1651}
1652
1653
[45293]1654#ifdef IN_RING3
[45152]1655/**
[45293]1656 * Enters a PDM read/write critical section with exclusive (write) access.
1657 *
1658 * @returns VINF_SUCCESS if entered successfully.
1659 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1660 * during the operation.
1661 *
[90347]1662 * @param pVM The cross context VM structure.
[45293]1663 * @param pThis Pointer to the read/write critical section.
1664 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1665 */
[90347]1666VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
[45293]1667{
[90347]1668 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/);
[45293]1669}
1670#endif /* IN_RING3 */
1671
1672
1673/**
[45152]1674 * Leave a critical section held exclusively.
1675 *
1676 * @returns VBox status code.
1677 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1678 * during the operation.
[90347]1679 * @param pVM The cross context VM structure.
[45152]1680 * @param pThis Pointer to the read/write critical section.
[45299]1681 * @param fNoVal No validation records (i.e. queued release).
[45152]1682 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1683 */
[90347]1684static int pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
[25663]1685{
1686 /*
1687 * Validate handle.
1688 */
[45110]1689 AssertPtr(pThis);
[45152]1690 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
[25663]1691
[57851]1692#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1693 NOREF(fNoVal);
1694#endif
1695
[90486]1696 /*
1697 * Check ownership.
1698 */
[90347]1699 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
[90486]1700 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1701
[25663]1702 RTNATIVETHREAD hNativeWriter;
[90637]1703 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
[25663]1704 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1705
[90665]1706
[25663]1707 /*
[90658]1708 * Unwind one recursion. Not the last?
[25663]1709 */
[90658]1710 if (pThis->s.Core.cWriteRecursions != 1)
[25663]1711 {
[45189]1712#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
[45299]1713 if (fNoVal)
1714 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1715 else
1716 {
[90658]1717 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
[45299]1718 if (RT_FAILURE(rc9))
1719 return rc9;
1720 }
[25663]1721#endif
[90658]1722#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1723 uint32_t const cDepth = --pThis->s.Core.cWriteRecursions;
1724#else
1725 uint32_t const cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1726#endif
1727 AssertReturn(cDepth != 0 && cDepth < UINT32_MAX, pdmCritSectRwCorrupted(pThis, "Invalid write recursion value on leave"));
1728 return VINF_SUCCESS;
1729 }
[90650]1730
[90665]1731
[90658]1732 /*
1733 * Final recursion.
1734 */
1735 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1736#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1737 if (fNoVal)
1738 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1739 else
1740 {
1741 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1742 if (RT_FAILURE(rc9))
1743 return rc9;
1744 }
1745#endif
1746
[90665]1747
[90650]1748#ifdef RTASM_HAVE_CMP_WRITE_U128
[90658]1749 /*
1750 * See if we can get out w/o any signalling as this is a common case.
1751 */
1752 if (pdmCritSectRwIsCmpWriteU128Supported())
1753 {
1754 RTCRITSECTRWSTATE OldState;
1755 OldState.s.u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1756 if (OldState.s.u64State == ((UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)))
[90650]1757 {
[90658]1758 OldState.s.hNativeWriter = hNativeSelf;
1759 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo));
[90650]1760
[90658]1761 RTCRITSECTRWSTATE NewState;
1762 NewState.s.u64State = RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
1763 NewState.s.hNativeWriter = NIL_RTNATIVETHREAD;
[90650]1764
[90654]1765# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
[90658]1766 pThis->s.Core.cWriteRecursions = 0;
[90654]1767# else
[90658]1768 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
[90654]1769# endif
[90658]1770 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
[90650]1771
[90658]1772 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128))
1773 return VINF_SUCCESS;
[90650]1774
[90658]1775 /* bail out. */
1776 pThis->s.Core.cWriteRecursions = 1;
[90650]1777 }
[90658]1778 }
[90665]1779#endif /* RTASM_HAVE_CMP_WRITE_U128 */
[90650]1780
[90665]1781
[90660]1782#if defined(IN_RING3) || defined(IN_RING0)
[90658]1783 /*
1784 * Ring-3: Straight forward, just update the state and if necessary signal waiters.
[90659]1785 * Ring-0: Try leave for real, depends on host and context.
[90658]1786 */
[90660]1787# ifdef IN_RING0
[90659]1788 Assert(RTSemEventIsSignalSafe() == RTSemEventMultiIsSignalSafe());
1789 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1790 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
1791 || VMMRZCallRing3IsEnabled(pVCpu)
1792 || RTSemEventIsSignalSafe()
1793 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
1794 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
1795 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
1796 )
[90660]1797# endif
[25663]1798 {
[90658]1799# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1800 pThis->s.Core.cWriteRecursions = 0;
1801# else
1802 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1803# endif
1804 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1805 ASMAtomicWriteHandle(&pThis->s.Core.u.s.hNativeWriter, NIL_RTNATIVETHREAD);
1806
1807 for (;;)
[45299]1808 {
[90658]1809 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1810 uint64_t u64OldState = u64State;
1811
1812 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1813 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on leave"));
1814 c--;
1815
1816 if ( c > 0
1817 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1818 {
[90659]1819 /*
1820 * Don't change the direction, wake up the next writer if any.
1821 */
[90658]1822 u64State &= ~RTCSRW_CNT_WR_MASK;
1823 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1824 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1825 {
[90672]1826 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,LeaveExcl));
[90659]1827 int rc;
1828 if (c == 0)
1829 rc = VINF_SUCCESS;
[90660]1830# ifdef IN_RING0
1831 else if (!RTSemEventIsSignalSafe() && pVCpu != NULL)
[90658]1832 {
[90659]1833 VMMR0EMTBLOCKCTX Ctx;
1834 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
1835 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
1836
1837 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1838
1839 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
[90658]1840 }
[90660]1841# endif
1842 else
1843 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
[90659]1844 AssertRC(rc);
1845 return rc;
[90658]1846 }
1847 }
1848 else
1849 {
[90659]1850 /*
1851 * Reverse the direction and signal the reader threads.
1852 */
[90658]1853 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1854 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1855 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1856 {
1857 Assert(!pThis->s.Core.fNeedReset);
1858 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
[90672]1859 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,LeaveExcl));
[90659]1860
1861 int rc;
[90660]1862# ifdef IN_RING0
1863 if (!RTSemEventMultiIsSignalSafe() && pVCpu != NULL)
[90659]1864 {
1865 VMMR0EMTBLOCKCTX Ctx;
1866 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
1867 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
1868
1869 rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1870
1871 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1872 }
[90660]1873 else
1874# endif
1875 rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
[90658]1876 AssertRC(rc);
[90659]1877 return rc;
[90658]1878 }
1879 }
1880
1881 ASMNopPause();
[90659]1882 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
1883 { /*likely*/ }
1884 else
[90658]1885 return VERR_SEM_DESTROYED;
[90659]1886 ASMNopPause();
[45299]1887 }
[90659]1888 /* not reached! */
[25663]1889 }
[90660]1890#endif /* IN_RING3 || IN_RING0 */
[25663]1891
[90665]1892
[90658]1893#ifndef IN_RING3
1894 /*
1895 * Queue the requested exit for ring-3 execution.
1896 */
[90659]1897# ifndef IN_RING0
[90658]1898 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
[90659]1899# endif
[90658]1900 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1901 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1902 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves),
1903 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
1904 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = pThis->s.pSelfR3;
1905 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1906 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i])
[93554]1907 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] & HOST_PAGE_OFFSET_MASK)
1908 == ((uintptr_t)pThis & HOST_PAGE_OFFSET_MASK),
[90658]1909 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i], pThis),
1910 pdmCritSectRwCorrupted(pThis, "Invalid self pointer on queue (excl)"));
1911 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1912 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1913 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1914 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
[25663]1915 return VINF_SUCCESS;
[90658]1916#endif
[25431]1917}
1918
1919
[45152]1920/**
[45299]1921 * Leave a critical section held exclusively.
1922 *
1923 * @returns VBox status code.
1924 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1925 * during the operation.
[90347]1926 * @param pVM The cross context VM structure.
[45299]1927 * @param pThis Pointer to the read/write critical section.
1928 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1929 */
[90347]1930VMMDECL(int) PDMCritSectRwLeaveExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
[45299]1931{
[90347]1932 return pdmCritSectRwLeaveExclWorker(pVM, pThis, false /*fNoVal*/);
[45299]1933}
1934
1935
1936#if defined(IN_RING3) || defined(IN_RING0)
1937/**
1938 * PDMCritSectBothFF interface.
1939 *
[90347]1940 * @param pVM The cross context VM structure.
[45299]1941 * @param pThis Pointer to the read/write critical section.
1942 */
[90347]1943void pdmCritSectRwLeaveExclQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
[45299]1944{
[90347]1945 pdmCritSectRwLeaveExclWorker(pVM, pThis, true /*fNoVal*/);
[45299]1946}
1947#endif
1948
1949
1950/**
[45152]1951 * Checks the caller is the exclusive (write) owner of the critical section.
1952 *
[58116]1953 * @retval true if owner.
1954 * @retval false if not owner.
[90347]1955 * @param pVM The cross context VM structure.
[45152]1956 * @param pThis Pointer to the read/write critical section.
1957 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1958 * RTCritSectRwIsWriteOwner.
1959 */
[90347]1960VMMDECL(bool) PDMCritSectRwIsWriteOwner(PVMCC pVM, PPDMCRITSECTRW pThis)
[25426]1961{
[25431]1962 /*
[25663]1963 * Validate handle.
[25431]1964 */
[45110]1965 AssertPtr(pThis);
[45152]1966 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
[25431]1967
[25663]1968 /*
1969 * Check ownership.
1970 */
1971 RTNATIVETHREAD hNativeWriter;
[90637]1972 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
[45152]1973 if (hNativeWriter == NIL_RTNATIVETHREAD)
1974 return false;
[90347]1975 return hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
[25426]1976}
1977
1978
[45152]1979/**
1980 * Checks if the caller is one of the read owners of the critical section.
1981 *
1982 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
1983 * enabled. Meaning, the answer is not trustworhty unless
1984 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
1985 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
1986 * creating the semaphore. And finally, if you used a locking class,
1987 * don't disable deadlock detection by setting cMsMinDeadlock to
1988 * RT_INDEFINITE_WAIT.
1989 *
1990 * In short, only use this for assertions.
1991 *
1992 * @returns @c true if reader, @c false if not.
[90347]1993 * @param pVM The cross context VM structure.
[45152]1994 * @param pThis Pointer to the read/write critical section.
1995 * @param fWannaHear What you'd like to hear when lock validation is not
1996 * available. (For avoiding asserting all over the place.)
1997 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
1998 */
[90347]1999VMMDECL(bool) PDMCritSectRwIsReadOwner(PVMCC pVM, PPDMCRITSECTRW pThis, bool fWannaHear)
[25908]2000{
2001 /*
2002 * Validate handle.
2003 */
[45110]2004 AssertPtr(pThis);
[45152]2005 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
[25908]2006
2007 /*
2008 * Inspect the state.
2009 */
[90654]2010 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
[45110]2011 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
[25908]2012 {
2013 /*
2014 * It's in write mode, so we can only be a reader if we're also the
2015 * current writer.
2016 */
2017 RTNATIVETHREAD hWriter;
[90637]2018 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hWriter);
[45152]2019 if (hWriter == NIL_RTNATIVETHREAD)
2020 return false;
[90347]2021 return hWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
[25908]2022 }
2023
2024 /*
2025 * Read mode. If there are no current readers, then we cannot be a reader.
2026 */
[45110]2027 if (!(u64State & RTCSRW_CNT_RD_MASK))
[25908]2028 return false;
2029
[45152]2030#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
[25908]2031 /*
2032 * Ask the lock validator.
[45152]2033 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
[25908]2034 */
[62659]2035 NOREF(fWannaHear);
[45152]2036 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
[25908]2037#else
2038 /*
2039 * Ok, we don't know, just tell the caller what he want to hear.
2040 */
2041 return fWannaHear;
2042#endif
2043}
2044
2045
[45152]2046/**
2047 * Gets the write recursion count.
2048 *
2049 * @returns The write recursion count (0 if bad critsect).
2050 * @param pThis Pointer to the read/write critical section.
2051 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
2052 * RTCritSectRwGetWriteRecursion.
2053 */
2054VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
[25426]2055{
[25431]2056 /*
[25663]2057 * Validate handle.
[25431]2058 */
[45110]2059 AssertPtr(pThis);
[45152]2060 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
[25431]2061
[25663]2062 /*
2063 * Return the requested data.
2064 */
[45152]2065 return pThis->s.Core.cWriteRecursions;
[25426]2066}
2067
2068
[45152]2069/**
2070 * Gets the read recursion count of the current writer.
2071 *
2072 * @returns The read recursion count (0 if bad critsect).
2073 * @param pThis Pointer to the read/write critical section.
2074 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
2075 * RTCritSectRwGetWriterReadRecursion.
2076 */
2077VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
[25426]2078{
[25431]2079 /*
[25663]2080 * Validate handle.
[25431]2081 */
[45110]2082 AssertPtr(pThis);
[45152]2083 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
[25431]2084
[25663]2085 /*
2086 * Return the requested data.
2087 */
[45152]2088 return pThis->s.Core.cWriterReads;
[25426]2089}
2090
2091
[45152]2092/**
2093 * Gets the current number of reads.
2094 *
2095 * This includes all read recursions, so it might be higher than the number of
2096 * read owners. It does not include reads done by the current writer.
2097 *
2098 * @returns The read count (0 if bad critsect).
2099 * @param pThis Pointer to the read/write critical section.
2100 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
2101 * RTCritSectRwGetReadCount.
2102 */
2103VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
[25426]2104{
[25431]2105 /*
2106 * Validate input.
2107 */
[45110]2108 AssertPtr(pThis);
[45152]2109 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
[25431]2110
[25663]2111 /*
2112 * Return the requested data.
2113 */
[90654]2114 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
[45110]2115 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
[25663]2116 return 0;
[45110]2117 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
[25426]2118}
2119
[45110]2120
[45152]2121/**
2122 * Checks if the read/write critical section is initialized or not.
2123 *
[58116]2124 * @retval true if initialized.
2125 * @retval false if not initialized.
[45152]2126 * @param pThis Pointer to the read/write critical section.
2127 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
2128 */
2129VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
[45110]2130{
2131 AssertPtr(pThis);
[45152]2132 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
[45110]2133}
2134
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use