kernel_api/sync/
rwlock.rs1use core::fmt::Formatter;
2use core::mem;
3use core::sync::atomic::{AtomicUsize, Ordering};
4
5#[stable(feature = "kernel_core_api", since = "1.0.0")]
7pub type RwSpinlock<T: ?Sized> = lock_api::RwLock<RwCount, T>;
8
9#[stable(feature = "kernel_core_api", since = "1.0.0")]
11pub type RwReadGuard<'a, T: ?Sized> = lock_api::RwLockReadGuard<'a, RwCount, T>;
12
13#[stable(feature = "kernel_core_api", since = "1.0.0")]
15pub type RwUpgradableReadGuard<'a, T: ?Sized> = lock_api::RwLockUpgradableReadGuard<'a, RwCount, T>;
16
17#[stable(feature = "kernel_core_api", since = "1.0.0")]
19pub type RwWriteGuard<'a, T: ?Sized> = lock_api::RwLockWriteGuard<'a, RwCount, T>;
20
21#[doc(hidden)]
22#[stable(feature = "kernel_core_api", since = "1.0.0")]
23pub struct RwCount(AtomicUsize);
24
25impl RwCount {
27 const WRITE_BIT_MASK: usize = 1<<(mem::size_of::<usize>() * 8 - 1);
28 const UPGRADEABLE_BIT_MASK: usize = 1<<(mem::size_of::<usize>() * 8 - 2);
29 const READ_COUNT_MASK: usize = !(Self::WRITE_BIT_MASK | Self::UPGRADEABLE_BIT_MASK);
30}
31
32#[stable(feature = "kernel_core_api", since = "1.0.0")]
33impl core::fmt::Debug for RwCount {
34 fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
35 let mut d = f.debug_struct("RwCount");
36 let val = self.0.load(Ordering::Relaxed);
37 let write = val & Self::WRITE_BIT_MASK != 0;
38 let read = val & Self::READ_COUNT_MASK;
39 let upgradeable_reader = val & Self::UPGRADEABLE_BIT_MASK != 0;
40 d.field("write", &write);
41 d.field("read", &(read + if upgradeable_reader { 1 } else { 0 }));
42 d.finish()
43 }
44}
45
46#[stable(feature = "kernel_core_api", since = "1.0.0")]
47unsafe impl lock_api::RawRwLock for RwCount {
48 const INIT: Self = Self(AtomicUsize::new(0));
49 type GuardMarker = lock_api::GuardSend; fn lock_shared(&self) {
52 while !self.try_lock_shared() {
53 core::hint::spin_loop();
54 }
55 }
56
57 fn try_lock_shared(&self) -> bool {
58 let mut old_value = self.0.load(Ordering::Relaxed);
59
60 loop {
61 let old_normal_count = old_value & Self::READ_COUNT_MASK;
62
63 if old_normal_count == Self::READ_COUNT_MASK { panic!("Reader count overflowed") }
64 if (old_value & Self::WRITE_BIT_MASK) != 0 { return false; }
65
66 let new_value = (old_normal_count + 1) | (old_value & Self::UPGRADEABLE_BIT_MASK);
67
68 match self.0.compare_exchange_weak(old_value, new_value, Ordering::Acquire, Ordering::Relaxed) {
69 Ok(_) => return true,
70 Err(new_old_value) => old_value = new_old_value
71 }
72 }
73 }
74
75 unsafe fn unlock_shared(&self) {
76 let mut old_value = self.0.load(Ordering::Relaxed);
77 loop {
78 let old_normal_count = old_value & !Self::UPGRADEABLE_BIT_MASK;
79
80 if cfg!(debug_assertions) && (old_value & Self::WRITE_BIT_MASK != 0) {
81 panic!("BUG: RwLock reader dropped while writer was active")
82 }
83 let new_value = (old_normal_count - 1) | (old_value & Self::UPGRADEABLE_BIT_MASK);
84 match self.0.compare_exchange_weak(old_value, new_value, Ordering::Release, Ordering::Relaxed) {
85 Ok(_) => return,
86 Err(new_old_value) => old_value = new_old_value
87 }
88 }
89 }
90
91 fn lock_exclusive(&self) {
92 while !self.try_lock_exclusive() {
93 core::hint::spin_loop();
94 }
95 }
96
97 fn try_lock_exclusive(&self) -> bool {
98 self.0.compare_exchange_weak(0, Self::WRITE_BIT_MASK, Ordering::Acquire, Ordering::Relaxed)
99 .is_ok()
100 }
101
102 unsafe fn unlock_exclusive(&self) {
103 if cfg!(debug_assertions) {
104 self.0.compare_exchange(Self::WRITE_BIT_MASK, 0, Ordering::Release, Ordering::Relaxed)
105 .expect("BUG: RwLock writer dropped while readers were active");
106 } else {
107 self.0.store(0, Ordering::Release);
108 }
109 }
110}
111
112#[stable(feature = "kernel_core_api", since = "1.0.0")]
113unsafe impl lock_api::RawRwLockDowngrade for RwCount {
114 unsafe fn downgrade(&self) {
115 if cfg!(debug_assertions) {
116 self.0.compare_exchange(Self::WRITE_BIT_MASK, 1, Ordering::SeqCst, Ordering::Relaxed)
117 .expect("BUG: RwLock writer downgraded while readers were active");
118 } else {
119 self.0.store(1, Ordering::SeqCst); }
122 }
123}
124
125#[stable(feature = "kernel_core_api", since = "1.0.0")]
126unsafe impl lock_api::RawRwLockUpgrade for RwCount {
127 fn lock_upgradable(&self) {
128 while !self.try_lock_upgradable() {
129 core::hint::spin_loop();
130 }
131 }
132
133 fn try_lock_upgradable(&self) -> bool {
134 let mut old_value = self.0.load(Ordering::Relaxed);
135
136 loop {
137 if (old_value & Self::WRITE_BIT_MASK) != 0 { return false; }
138 if (old_value & Self::UPGRADEABLE_BIT_MASK) != 0 { return false; }
139
140 let new_value = old_value | Self::UPGRADEABLE_BIT_MASK;
141
142 match self.0.compare_exchange_weak(old_value, new_value, Ordering::Acquire, Ordering::Relaxed) {
143 Ok(_) => return true,
144 Err(new_old_value) => old_value = new_old_value
145 }
146 }
147 }
148
149 unsafe fn unlock_upgradable(&self) {
150 let mut old_value = self.0.load(Ordering::Relaxed);
151 loop {
152 if cfg!(debug_assertions) && (old_value & Self::WRITE_BIT_MASK != 0) {
153 panic!("BUG: RwLock upgradable reader dropped while writer was active")
154 }
155 let new_value = old_value & !Self::UPGRADEABLE_BIT_MASK;
156 match self.0.compare_exchange_weak(old_value, new_value, Ordering::Release, Ordering::Relaxed) {
157 Ok(_) => return,
158 Err(new_old_value) => old_value = new_old_value
159 }
160 }
161 }
162
163 unsafe fn upgrade(&self) {
164 while !self.try_upgrade() {
165 core::hint::spin_loop();
166 }
167 }
168
169 unsafe fn try_upgrade(&self) -> bool {
170 self.0.compare_exchange_weak(Self::UPGRADEABLE_BIT_MASK, Self::WRITE_BIT_MASK, Ordering::Relaxed, Ordering::Relaxed)
171 .is_ok()
172 }
173}
174
175#[stable(feature = "kernel_core_api", since = "1.0.0")]
176unsafe impl lock_api::RawRwLockUpgradeDowngrade for RwCount {
177 unsafe fn downgrade_upgradable(&self) {
178 let mut old_value = self.0.load(Ordering::Relaxed);
179 loop {
180 if cfg!(debug_assertions) && (old_value & Self::WRITE_BIT_MASK != 0) {
181 panic!("BUG: RwLock upgradable reader downgraded while writer was active")
182 }
183
184 let old_normal_count = old_value & Self::READ_COUNT_MASK;
185 if old_normal_count == Self::READ_COUNT_MASK { panic!("Reader count overflowed") }
186
187 let new_value = old_normal_count + 1;
188 match self.0.compare_exchange_weak(old_value, new_value, Ordering::Relaxed, Ordering::Relaxed) {
189 Ok(_) => return,
190 Err(new_old_value) => old_value = new_old_value
191 }
192 }
193 }
194
195 unsafe fn downgrade_to_upgradable(&self) {
196 if cfg!(debug_assertions) {
197 self.0.compare_exchange(Self::WRITE_BIT_MASK, Self::UPGRADEABLE_BIT_MASK, Ordering::SeqCst, Ordering::Relaxed)
198 .expect("BUG: RwLock writer downgraded while readers were active");
199 } else {
200 self.0.store(Self::UPGRADEABLE_BIT_MASK, Ordering::SeqCst); }
203 }
204}