kernel_api/sync/
mutex.rs

1use core::cell::Cell;
2use core::convert::Into;
3use core::mem::ManuallyDrop;
4use core::sync::atomic::{AtomicU8, AtomicUsize, Ordering};
5
6/// A mutual exclusion primitive useful for protecting shared data
7#[stable(feature = "kernel_core_api", since = "1.0.0")]
8pub type Spinlock<T: ?Sized> = lock_api::Mutex<RawSpinlock, T>;
9
10/// An RAII implementation of a “scoped lock” of a mutex. When this structure is dropped (falls out of scope), the lock will be unlocked.
11#[stable(feature = "kernel_core_api", since = "1.0.0")]
12pub type SpinlockGuard<'a, T: ?Sized> = lock_api::MutexGuard<'a, RawSpinlock, T>;
13
14#[stable(feature = "kernel_core_api", since = "1.0.0")]
15pub trait SpinlockGuardExt {
16    #[stable(feature = "kernel_core_api", since = "1.0.0")]
17    fn unlock_no_interrupts(this: Self);
18}
19
20#[stable(feature = "kernel_core_api", since = "1.0.0")]
21impl<T> SpinlockGuardExt for SpinlockGuard<'_, T> {
22    fn unlock_no_interrupts(this: Self) {
23        let this = ManuallyDrop::new(this);
24        unsafe {
25            let spinlock = Self::mutex(&this).raw();
26            spinlock.unlock_no_interrupts();
27        }
28    }
29}
30
31#[derive(Debug, Copy, Clone, Eq, PartialEq)]
32enum State {
33    Unlocked,
34    Locked,
35}
36
37impl State {
38    const fn const_into_u8(self) -> u8 {
39        match self {
40            State::Unlocked => 0,
41            State::Locked => 1,
42        }
43    }
44
45    const fn const_from_u8(value: u8) -> Result<Self, ()> {
46        match value {
47            0 => Ok(State::Unlocked),
48            1 => Ok(State::Locked),
49            _ => Err(())
50        }
51    }
52}
53
54#[stable(feature = "kernel_core_api", since = "1.0.0")]
55impl From<State> for u8 {
56    fn from(value: State) -> Self {
57        value.const_into_u8()
58    }
59}
60
61impl TryFrom<u8> for State {
62    type Error = ();
63
64    fn try_from(value: u8) -> Result<Self, Self::Error> {
65        Self::const_from_u8(value)
66    }
67}
68
69#[stable(feature = "kernel_core_api", since = "1.0.0")]
70pub struct RawSpinlock {
71    state: AtomicU8,
72    irq_state: AtomicUsize
73}
74
75#[stable(feature = "kernel_core_api", since = "1.0.0")]
76unsafe impl Send for RawSpinlock {}
77
78#[stable(feature = "kernel_core_api", since = "1.0.0")]
79unsafe impl Sync for RawSpinlock {}
80
81impl RawSpinlock {
82    unsafe fn unlock_no_interrupts(&self) {
83        let old_state = self.state.swap(State::Unlocked.into(), Ordering::Release);
84        let old_state = State::try_from(old_state).expect("Spinlock in undefined state");
85
86        match old_state {
87            State::Unlocked => unreachable!("Mutex was unlocked while unlocked"),
88            State::Locked => {},
89        }
90    }
91}
92
93#[stable(feature = "kernel_core_api", since = "1.0.0")]
94unsafe impl lock_api::RawMutex for RawSpinlock {
95    const INIT: Self = Self {
96        state: AtomicU8::new(State::Unlocked.const_into_u8()),
97        irq_state: AtomicUsize::new(0),
98    };
99
100    type GuardMarker = lock_api::GuardNoSend; // Dropping guard on other core would cause interrupts to be enabled in the wrong place
101
102    fn lock(&self) {
103        let irq_state = unsafe { crate::bridge::hal::__popcorn_disable_irq() };
104
105        while let Err(_) = self.state.compare_exchange_weak(
106            State::Unlocked.into(),
107            State::Locked.into(),
108            Ordering::Acquire,
109            Ordering::Relaxed
110        ) {
111            core::hint::spin_loop();
112        }
113
114        self.irq_state.store(irq_state, Ordering::Relaxed);
115    }
116
117    fn try_lock(&self) -> bool {
118        let irq_state = unsafe { crate::bridge::hal::__popcorn_disable_irq() };
119        let success = self.state.compare_exchange(
120            State::Unlocked.into(),
121            State::Locked.into(),
122            Ordering::Acquire,
123            Ordering::Relaxed
124        ).is_ok();
125
126        if !success { unsafe { crate::bridge::hal::__popcorn_set_irq(irq_state) } }
127        else { self.irq_state.store(irq_state, Ordering::Relaxed) }
128
129        success
130    }
131
132    unsafe fn unlock(&self) {
133        let old_irq_state = self.irq_state.load(Ordering::Relaxed);
134        let old_state = self.state.swap(State::Unlocked.into(), Ordering::Release);
135        let old_state = State::try_from(old_state).expect("Spinlock in undefined state");
136
137        match old_state {
138            State::Unlocked => unreachable!("Mutex was unlocked while unlocked"),
139            State::Locked => unsafe { crate::bridge::hal::__popcorn_set_irq(old_irq_state) },
140        }
141    }
142}
143
144/*
145fn enable_irq() {
146    #[cfg(target_arch = "x86_64")]
147    unsafe { asm!("sti", options(preserves_flags, nomem)); }
148
149    // FIXME: these flags should be the same as when interrupts were disabled
150    #[cfg(target_arch = "aarch64")]
151    unsafe { asm!("msr DAIFSet, #0b1111"); }
152}
153
154/// Returns whether interrupts were enabled before disablement
155fn disable_irq() -> bool {
156    #[cfg(target_arch = "x86_64")]
157    fn disable() -> bool {
158        let flags: u64;
159        unsafe {
160            asm!("
161			pushf
162			pop {}
163			cli
164		", out(reg) flags, options(preserves_flags, nomem))
165        }
166
167        (flags & 0x0200) != 0
168    }
169
170    #[cfg(target_arch = "aarch64")]
171    fn disable() -> bool {
172        let daif: u64;
173        unsafe {
174            asm!("
175			mrs {}, DAIF
176			msr DAIFClr, #0b1111
177		", out(reg) daif)
178        }
179
180        (daif & 0b1111) != 0
181    }
182
183    disable()
184}*/