[PATCH v2 08/22] ntdll: Downgrade loader lock to shared when calling DLL entry points.

Paul Gofman pgofman at codeweavers.com
Tue Oct 5 17:49:14 CDT 2021


Signed-off-by: Paul Gofman <pgofman at codeweavers.com>
---
 dlls/ntdll/loader.c | 192 +++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 180 insertions(+), 12 deletions(-)

diff --git a/dlls/ntdll/loader.c b/dlls/ntdll/loader.c
index 547c61aa7cb..c4fdfdf414b 100644
--- a/dlls/ntdll/loader.c
+++ b/dlls/ntdll/loader.c
@@ -160,8 +160,18 @@ static RTL_CRITICAL_SECTION_DEBUG critsect_debug =
 };
 static RTL_CRITICAL_SECTION loader_section = { &critsect_debug, -1, 0, 0, 0, 0 };
 
+
+/* Thread which holds exclusive lock or shared lock downgraded from exclusive which can be upgraded back. r*/
+static volatile DWORD exclusive_lock_thread_id;
+
 static RTL_SRWLOCK loader_srw_lock = RTL_SRWLOCK_INIT;
 static BOOL locked_exclusive;
+static RTL_CONDITION_VARIABLE exclusive_lock_released_cv = RTL_CONDITION_VARIABLE_INIT;
+
+#define MAX_DOWNGRADE_RECURSION_COUNT 256
+
+static unsigned int lock_exclusive_recursion_count[MAX_DOWNGRADE_RECURSION_COUNT];
+static unsigned int lock_exclusive_recursion_stack;
 
 static CRITICAL_SECTION dlldir_section;
 static CRITICAL_SECTION_DEBUG dlldir_critsect_debug =
@@ -240,27 +250,99 @@ static ULONG dec_recursion_count(void)
     return --NtCurrentTeb()->Spare2;
 }
 
+/*************************************************************************
+ *		wait_for_exclusive_lock_release
+ *
+ * Should be called with shared loader lock.
+ * Waits until exclusive lock is fully released through unlock_loader(), not
+ * just downgraded.
+ * The shared loader lock is released during wait and then acquired back, so
+ * any prior shared data is invalidated.
+ */
+static void wait_for_exclusive_lock_release(void)
+{
+    LARGE_INTEGER timeout;
+    NTSTATUS status;
+
+    TRACE( ".\n" );
+
+    if (RtlDllShutdownInProgress()) return;
+
+    assert( !locked_exclusive );
+    /* Some thread must be in the loading sequence otherwise we may wait forever. */
+    assert( exclusive_lock_thread_id && exclusive_lock_thread_id != GetCurrentThreadId() );
+
+    timeout.QuadPart = -10000 * 5000; /* 5 sec. */
+
+    while (!RtlDllShutdownInProgress()
+           && ((status = RtlSleepConditionVariableSRW( &exclusive_lock_released_cv, &loader_srw_lock,
+                                                      &timeout, CONDITION_VARIABLE_LOCKMODE_SHARED ))
+           || exclusive_lock_thread_id))
+    {
+        if (status)
+        {
+            ERR( "Error waiting for load event complete, status %#x.\n", status );
+            assert( status == STATUS_TIMEOUT );
+        }
+    }
+}
+
 /*************************************************************************
  *		lock_loader_exclusive
  *
  * Take exclusive ownership of internal loader lock.
  * Recursive locking is allowed.
+ * If the current lock is shared taking exclusive mode is only possible if the current
+ * mode was previously downgraded from exclusive, i. e., if the initial lock the thread has
+ * taken was exclusive.
  */
 static void lock_loader_exclusive(void)
 {
     ULONG recursion_count = inc_recursion_count();
 
-    TRACE( "recursion_count %u.\n", recursion_count );
+    TRACE( "recursion_count %u, locked_exclusive %d, exclusive_lock_thread_id %04x.\n",
+           recursion_count, locked_exclusive, exclusive_lock_thread_id );
+
+    if (RtlDllShutdownInProgress()) return;
+
+    /* locked_exclusive implies recursion_count but still check recursion_count first
+     * to avoid potentially racy 'locked_exclusive' access without holding loader_srw_lock. */
+    if (recursion_count && locked_exclusive)
+    {
+        ++lock_exclusive_recursion_count[lock_exclusive_recursion_stack];
+        return;
+    }
+
     if (!recursion_count)
     {
-        if (!RtlDllShutdownInProgress())
-            RtlAcquireSRWLockExclusive( &loader_srw_lock );
-        locked_exclusive = TRUE;
+        /* We can't call RtlAcquireSRWLockExclusive() right away. If there is a thread
+         * which claimed exclusive access through exclusive_lock_thread_id and downgraded
+         * the lock to shared, waiting for exclusive lock will block any new shared locks
+         * waiters which should be allowed to pass in that state.
+         * So acquire a shared lock, claim the exclusive access and then upgrade the SRW lock
+         * to exclusive. */
+        RtlAcquireSRWLockShared( &loader_srw_lock );
+
+        while (InterlockedCompareExchange( (LONG volatile *)&exclusive_lock_thread_id, GetCurrentThreadId(), 0))
+        {
+            wait_for_exclusive_lock_release();
+            if (RtlDllShutdownInProgress()) return;
+        }
+        assert( !lock_exclusive_recursion_stack );
+        assert( !lock_exclusive_recursion_count[lock_exclusive_recursion_stack] );
     }
     else
     {
-        assert( locked_exclusive );
+        /* It is not allowed to upgrade the lock to exclusive, only
+         * reacquire previously downgraded exclusive lock. */
+        assert( GetCurrentThreadId() == exclusive_lock_thread_id );
     }
+    RtlReleaseSRWLockShared( &loader_srw_lock );
+    if (RtlDllShutdownInProgress()) return;
+    RtlAcquireSRWLockExclusive( &loader_srw_lock );
+    locked_exclusive = TRUE;
+
+    ++lock_exclusive_recursion_count[lock_exclusive_recursion_stack];
 }
 
 /*************************************************************************
@@ -273,35 +355,119 @@ static void lock_loader_shared(void)
 {
     ULONG recursion_count = inc_recursion_count();
 
-    TRACE("recursion_count %u, locked_exclusive %d.\n", recursion_count, locked_exclusive);
+    TRACE( "recursion_count %u, locked_exclusive %d, exclusive_lock_thread_id %04x.\n",
+           recursion_count, locked_exclusive, exclusive_lock_thread_id );
 
-    if (!recursion_count && !RtlDllShutdownInProgress())
+    if (RtlDllShutdownInProgress()) return;
+
+    if (!recursion_count)
         RtlAcquireSRWLockShared( &loader_srw_lock );
+
+    /* If we are in exclusive lock already the lock stays exclusive.
+     * The exclusive recursion count is needed to downgrade the exclusive
+     * lock back to shared once we out of the exclusive lock scope.
+     * The scopes are bounded by explicit exclusive locks downgrades
+     * which increment lock_exclusive_recursion_stack.
+     */
+    if (locked_exclusive)
+        ++lock_exclusive_recursion_count[lock_exclusive_recursion_stack];
 }
 
 /*************************************************************************
  *		unlock_loader
  *
  * Release internal loader lock.
+ * The exclusive lock being released might have been recursively taken while the
+ * thread was holding shared lock (downgraded from exclusive). In this case unlock_loader()
+ * will denote the exclusive lock to shared.
  */
 static void unlock_loader(void)
 {
     ULONG recursion_count = dec_recursion_count();
 
-    TRACE( "recursion_count %u.\n", recursion_count );
+    TRACE("recursion_count %u, locked_exclusive %d, lock_exclusive_recursion_stack %u.\n",
+          recursion_count, locked_exclusive, lock_exclusive_recursion_stack );
 
     if (RtlDllShutdownInProgress()) return;
 
     assert( recursion_count != ~0u );
 
-    if (recursion_count) return;
+    if (!locked_exclusive)
+    {
+        if (!recursion_count) RtlReleaseSRWLockShared( &loader_srw_lock );
+        return;
+    }
+
+    assert( exclusive_lock_thread_id == GetCurrentThreadId() );
+    assert( lock_exclusive_recursion_count[lock_exclusive_recursion_stack] );
+    assert( recursion_count || lock_exclusive_recursion_count[lock_exclusive_recursion_stack] == 1);
 
-    if (locked_exclusive)
+    if (--lock_exclusive_recursion_count[lock_exclusive_recursion_stack]) return;
+
+    locked_exclusive = FALSE;
+
+    if (!recursion_count)
     {
-        locked_exclusive = FALSE;
+        InterlockedExchange( (LONG volatile *)&exclusive_lock_thread_id, 0 );
         RtlReleaseSRWLockExclusive( &loader_srw_lock );
+        RtlWakeAllConditionVariable( &exclusive_lock_released_cv );
     }
-    else RtlReleaseSRWLockShared( &loader_srw_lock );
+    else
+    {
+        assert( lock_exclusive_recursion_stack );
+        RtlReleaseSRWLockExclusive( &loader_srw_lock );
+        if (RtlDllShutdownInProgress()) return;
+        RtlAcquireSRWLockShared( &loader_srw_lock );
+    }
+}
+
+/*************************************************************************
+ *		lock_loader_downgrade_exclusive
+ *
+ * Denote exclusive internal loader lock ownership to shared.
+ * Denoted lock allows other threads to take shared lock but the threads
+ * waiting for exclusive lock will not overtake the lock until the
+ * initial (exclusive) lock taken by the current thread is released.
+ */
+static void lock_loader_downgrade_exclusive(void)
+{
+    TRACE( "locked_exclusive %d, exclusive thread %04x.\n", locked_exclusive, exclusive_lock_thread_id );
+
+    if (RtlDllShutdownInProgress()) return;
+
+    assert( exclusive_lock_thread_id == GetCurrentThreadId() );
+    assert( locked_exclusive );
+    assert( lock_exclusive_recursion_stack < MAX_DOWNGRADE_RECURSION_COUNT );
+    ++lock_exclusive_recursion_stack;
+    assert( !lock_exclusive_recursion_count[lock_exclusive_recursion_stack] );
+
+    locked_exclusive = FALSE;
+    RtlReleaseSRWLockExclusive( &loader_srw_lock );
+    if (RtlDllShutdownInProgress()) return;
+    RtlAcquireSRWLockShared( &loader_srw_lock );
+}
+
+/*************************************************************************
+ *		lock_loader_restore_exclusive
+ *
+ * Restore exclusive internal loader lock ownership which was previously denoted to shared.
+ */
+static void lock_loader_restore_exclusive(void)
+{
+    TRACE( "locked_exclusive %d, exclusive thread %04x.\n", locked_exclusive, exclusive_lock_thread_id );
+
+    if (RtlDllShutdownInProgress()) return;
+
+    assert( !locked_exclusive );
+    assert( GetCurrentThreadId() == exclusive_lock_thread_id );
+
+    assert( lock_exclusive_recursion_stack );
+    --lock_exclusive_recursion_stack;
+
+    RtlReleaseSRWLockShared( &loader_srw_lock );
+    if (RtlDllShutdownInProgress()) return;
+    RtlAcquireSRWLockExclusive( &loader_srw_lock );
+    locked_exclusive = TRUE;
 }
 
 #define RTL_UNLOAD_EVENT_TRACE_NUMBER 64
@@ -1478,6 +1644,7 @@ static NTSTATUS MODULE_InitDLL( WINE_MODREF *wm, UINT reason, LPVOID lpReserved
     else TRACE("(%p %s,%s,%p) - CALL\n", module, debugstr_w(wm->ldr.BaseDllName.Buffer),
                reason_names[reason], lpReserved );
 
+    lock_loader_downgrade_exclusive();
     RtlEnterCriticalSection( &loader_section );
 
     __TRY
@@ -1495,6 +1662,7 @@ static NTSTATUS MODULE_InitDLL( WINE_MODREF *wm, UINT reason, LPVOID lpReserved
     __ENDTRY
 
     RtlLeaveCriticalSection( &loader_section );
+    lock_loader_restore_exclusive();
 
     /* The state of the module list may have changed due to the call
        to the dll. We cannot assume that this module has not been
-- 
2.31.1




More information about the wine-devel mailing list