From 82d7cecaac94cb2e5abe1990a9ab728a471871ee Mon Sep 17 00:00:00 2001 From: shanhe72101 <137591095+shanhe72101@users.noreply.github.com> Date: Thu, 31 Jul 2025 17:22:58 +0800 Subject: [PATCH] Restore read_count when the rwlock acquisition timeout --- src/bthread/rwlock.cpp | 25 +++++++++++++++++++++---- test/BUILD.bazel | 2 +- test/bthread_rwlock_unittest.cpp | 24 ++++++++++++++++++++---- 3 files changed, 42 insertions(+), 9 deletions(-) diff --git a/src/bthread/rwlock.cpp b/src/bthread/rwlock.cpp index e635668373..c2d4dc4370 100644 --- a/src/bthread/rwlock.cpp +++ b/src/bthread/rwlock.cpp @@ -41,7 +41,8 @@ const int RWLockMaxReaders = 1 << 30; // For reading. static int rwlock_rdlock_impl(bthread_rwlock_t* __restrict rwlock, const struct timespec* __restrict abstime) { - int reader_count = ((butil::atomic*)&rwlock->reader_count) + auto* reader_count_atomic = (butil::atomic*)&rwlock->reader_count; + int reader_count = reader_count_atomic ->fetch_add(1, butil::memory_order_acquire) + 1; // Fast path. if (reader_count >= 0) { @@ -53,18 +54,32 @@ static int rwlock_rdlock_impl(bthread_rwlock_t* __restrict rwlock, // Don't sample when contention profiler is off. if (NULL == bthread::g_cp) { - return bthread_sem_timedwait(&rwlock->reader_sema, abstime); + int rc = bthread_sem_timedwait(&rwlock->reader_sema, abstime); + if (rc != 0){ + reader_count_atomic->fetch_add(-1, butil::memory_order_relaxed); + } + return rc; } // Ask Collector if this (contended) locking should be sampled. const size_t sampling_range = bvar::is_collectable(&bthread::g_cp_sl); if (!bvar::is_sampling_range_valid(sampling_range)) { // Don't sample. - return bthread_sem_timedwait(&rwlock->reader_sema, abstime); + int rc = bthread_sem_timedwait(&rwlock->reader_sema, abstime); + if (rc != 0){ + reader_count_atomic->fetch_add(-1, butil::memory_order_relaxed); + } + return rc; } // Sample. const int64_t start_ns = butil::cpuwide_time_ns(); int rc = bthread_sem_timedwait(&rwlock->reader_sema, abstime); + if (rc != 0){ + reader_count_atomic->fetch_add(-1, butil::memory_order_relaxed); + } const int64_t end_ns = butil::cpuwide_time_ns(); + if (rc != 0){ + ((butil::atomic*)&rwlock->reader_count)->fetch_add(-1, butil::memory_order_relaxed); + } const bthread_contention_site_t csite{end_ns - start_ns, sampling_range}; // Submit `csite' for each reader immediately after // owning rdlock to avoid the contention of `csite'. @@ -189,7 +204,8 @@ static inline int rwlock_wrlock_impl(bthread_rwlock_t* __restrict rwlock, } // Announce to readers there is a pending writer. - int reader_count = ((butil::atomic*)&rwlock->reader_count) + auto* reader_count_atomic = (butil::atomic*)&rwlock->reader_count; + int reader_count = reader_count_atomic ->fetch_add(-RWLockMaxReaders, butil::memory_order_release); // Wait for active readers. if (reader_count != 0 && @@ -204,6 +220,7 @@ static inline int rwlock_wrlock_impl(bthread_rwlock_t* __restrict rwlock, rc = bthread_sem_timedwait(&rwlock->writer_sema, abstime); if (0 != rc) { SUBMIT_CSITE_IF_NEED; + reader_count_atomic->fetch_add(RWLockMaxReaders, butil::memory_order_release); bthread_mutex_unlock(&rwlock->write_queue_mutex); return rc; } diff --git a/test/BUILD.bazel b/test/BUILD.bazel index 05420ae310..b89adb043d 100644 --- a/test/BUILD.bazel +++ b/test/BUILD.bazel @@ -238,7 +238,7 @@ cc_test( # glog CHECK die with a fatal error "bthread_key_unittest.cpp", "bthread_butex_multi_tag_unittest.cpp", - "bthread_rwlock_unittest.cpp", + #"bthread_rwlock_unittest.cpp", "bthread_semaphore_unittest.cpp", ], ), diff --git a/test/bthread_rwlock_unittest.cpp b/test/bthread_rwlock_unittest.cpp index 2da226cb2f..ab385a7534 100644 --- a/test/bthread_rwlock_unittest.cpp +++ b/test/bthread_rwlock_unittest.cpp @@ -85,26 +85,27 @@ TEST(RWLockTest, used_in_pthread) { } void* do_timedrdlock(void *arg) { - struct timespec t = { -2, 0 }; + struct timespec t = { 0, 100 }; EXPECT_EQ(ETIMEDOUT, bthread_rwlock_timedrdlock((bthread_rwlock_t*)arg, &t)); return NULL; } void* do_timedwrlock(void *arg) { - struct timespec t = { -2, 0 }; + struct timespec t = { 0, 100 }; EXPECT_EQ(ETIMEDOUT, bthread_rwlock_timedwrlock((bthread_rwlock_t*)arg, &t)); - LOG(INFO) << 10; return NULL; } TEST(RWLockTest, timedlock) { bthread_rwlock_t rw; ASSERT_EQ(0, bthread_rwlock_init(&rw, NULL)); + bthread_t th; ASSERT_EQ(0, bthread_rwlock_rdlock(&rw)); - bthread_t th; ASSERT_EQ(0, bthread_start_urgent(&th, NULL, do_timedwrlock, &rw)); ASSERT_EQ(0, bthread_join(th, NULL)); + ASSERT_EQ(0, bthread_start_urgent(&th, NULL, rdlocker, &rw)); + ASSERT_EQ(0, bthread_join(th, NULL)); ASSERT_EQ(0, bthread_rwlock_unlock(&rw)); ASSERT_EQ(0, bthread_rwlock_wrlock(&rw)); @@ -113,6 +114,21 @@ TEST(RWLockTest, timedlock) { ASSERT_EQ(0, bthread_start_urgent(&th, NULL, do_timedrdlock, &rw)); ASSERT_EQ(0, bthread_join(th, NULL)); ASSERT_EQ(0, bthread_rwlock_unlock(&rw)); + + ASSERT_EQ(0, bthread_rwlock_wrlock(&rw)); + ASSERT_EQ(0, bthread_start_urgent(&th, NULL, do_timedrdlock, &rw)); + ASSERT_EQ(0, bthread_join(th, NULL)); + ASSERT_EQ(0, bthread_start_urgent(&th, NULL, do_timedwrlock, &rw)); + ASSERT_EQ(0, bthread_join(th, NULL)); + ASSERT_EQ(0, bthread_rwlock_unlock(&rw)); + + ASSERT_EQ(0, bthread_rwlock_rdlock(&rw)); + ASSERT_EQ(0, bthread_start_urgent(&th, NULL, do_timedwrlock, &rw)); + ASSERT_EQ(0, bthread_join(th, NULL)); + ASSERT_EQ(0, bthread_start_urgent(&th, NULL, rdlocker, &rw)); + ASSERT_EQ(0, bthread_join(th, NULL)); + ASSERT_EQ(0, bthread_rwlock_unlock(&rw)); + ASSERT_EQ(0, bthread_rwlock_destroy(&rw)); }