kernel_api/memory/
mapping.rs

1//! API for managing memory at a high level.
2//!
3//! The mapping API implements a RAII based API for managing memory maps. Each memory map owns a region of physical
4//! and virtual memory, and manages the paging required to map the two together. It is also possible for only a subset
5//! of the virtual memory region to be mapped to physical memory.
6//!
7//! Each map is built on a [`Mappable`] type, which implements the required methods to calculate the required virtual
8//! memory, and how to map it to physical memory. This can be used to instantiate a [`RawMapping`] which handles the
9//! actual mapping.
10//!
11//! This module exports two common flavours of memory map: [`Mapping`] and [`Stack`].
12
13#![stable(feature = "kernel_mmap", since = "1.1.0")]
14
15use core::fmt::{Debug, Formatter};
16use core::marker::PhantomData;
17use core::mem::ManuallyDrop;
18use core::num::NonZero;
19use core::{mem, ptr};
20use log::debug;
21use crate::memory::allocator::{PhysicalAllocator, SpecificLocation};
22use crate::memory::{AllocError, Frame, Page};
23use crate::memory::physical::{OwnedFrames, highmem};
24use crate::memory::r#virtual::{AddressSpaceTy, Kernel, OwnedPages, Userspace, VirtualAllocator};
25use crate::memory::r#virtual::address_space::{Weak, AddressSpace};
26
27/// Basic operations to decide how to map memory together.
28///
29/// Implementations of this can be used to instantiate a [`RawMapping`].
30#[unstable(feature = "kernel_mmap_trait", issue = "24")]
31pub trait Mappable {
32	/// The amount of virtual memory required to create a mapping with `physical_length` [`Frame`]s
33	fn physical_length_to_virtual_length(&self, physical_length: NonZero<usize>) -> NonZero<usize>;
34
35	/// The number of [`Page`]s to offset the physical memory into the allocated virtual memory
36	fn physical_start_offset_from_virtual(&self) -> isize;
37}
38
39/// The memory protection to use for the memory mapping
40#[unstable(feature = "kernel_mmap_config", issue = "24")]
41#[derive(Copy, Clone, Debug, Eq, PartialEq)]
42pub enum Protection {
43	/// The mapping is read-write and can be executed from
44	RWX,
45	/// The mapping is read-write, executable, and user accessible
46	RWXU,
47}
48
49#[unstable(feature = "kernel_mmap_config", issue = "24")]
50impl Default for Protection {
51	fn default() -> Self {
52		Self::RWX
53	}
54}
55
56mod private {
57	use crate::memory::{Frame, Page};
58
59	#[unstable(feature = "kernel_mmap_config", issue = "24")]
60	pub trait Sealed {}
61
62	#[unstable(feature = "kernel_mmap_config", issue = "24")]
63	impl Sealed for Page {}
64	#[unstable(feature = "kernel_mmap_config", issue = "24")]
65	impl Sealed for Frame {}
66}
67
68/// A marker trait for types that can be used as a [`Location`]
69#[unstable(feature = "kernel_mmap_config", issue = "24")]
70pub trait Address: private::Sealed {}
71#[unstable(feature = "kernel_mmap_config", issue = "24")]
72impl Address for Page {}
73#[unstable(feature = "kernel_mmap_config", issue = "24")]
74impl Address for Frame {}
75
76/// The location at which to make the [mapping](self)
77#[stable(feature = "kernel_mmap", since = "1.1.0")]
78#[derive(Copy, Clone)]
79pub enum Location<A: Address> {
80	/// The mapping can go anywhere
81	#[stable(feature = "kernel_mmap", since = "1.1.0")] Any,
82	/// The mapping must be aligned to a specific number of [`Page`]s/[`Frame`]s
83	#[unstable(feature = "kernel_mmap_config", issue = "24")] Aligned(NonZero<u32>),
84	/// The mapping will fail if it cannot be allocated at this exact location
85	#[stable(feature = "kernel_mmap", since = "1.1.0")] At(#[stable(feature = "kernel_mmap", since = "1.1.0")] A),
86	/// The mapping must be below this location, aligned to `with_alignment` number of [`Page`]s/[`Frame`]s
87	#[unstable(feature = "kernel_mmap_config", issue = "24")] Below { location: A, with_alignment: NonZero<u32> }
88}
89
90#[doc(hidden)]
91#[unstable(feature = "kernel_mmap_config", issue = "24")]
92impl From<Location<Frame>> for super::allocator::Location {
93	fn from(value: Location<Frame>) -> Self {
94		use super::allocator::{Location as XLocation, SpecificLocation};
95		match value {
96			Location::Any => XLocation::Any,
97			Location::Aligned(a) => XLocation::Specific(SpecificLocation::Aligned(a)),
98			Location::At(f) => XLocation::Specific(SpecificLocation::At(f)),
99			Location::Below { location, with_alignment } => XLocation::Specific(SpecificLocation::Below{ location, with_alignment }),
100		}
101	}
102}
103
104/// When to allocate physical memory for the [mapping](self)
105#[unstable(feature = "kernel_mmap_config", issue = "24")]
106pub enum Laziness { Lazy, Prefault }
107
108/// Configuration for creating a [mapping](self)
109///
110/// By default, it will allocate memory anywhere that is valid, using the kernel [`AddressSpace`], and the
111/// `highmem` [`physical allocator`](PhysicalAllocator). It will lazily allocate physical memory, and map it
112/// with read, write and execute permissions.
113#[stable(feature = "kernel_mmap", since = "1.1.0")]
114pub struct Config<'physical_allocator, A: AddressSpaceTy = Kernel> {
115	physical_location: Location<Frame>,
116	virtual_location: Location<Page>,
117	_laziness: Laziness,
118	length: NonZero<usize>,
119	physical_allocator: &'physical_allocator dyn PhysicalAllocator,
120	address_space: A,
121	protection: Protection,
122}
123
124impl Config<'static, Kernel> {
125	/// Creates a new [mapping](self) configuration with default options
126	///
127	/// `length` is specified in pages
128	///
129	/// The default options are not guaranteed, but at the moment are:
130	/// - physical and virtual locations: anywhere
131	/// - lazily allocated
132	/// - Highmem physical allocator
133	/// - Kernel address space
134	/// - Readable, writable and executable
135	#[stable(feature = "kernel_mmap", since = "1.1.0")]
136	pub fn new(length: NonZero<usize>) -> Self {
137		Config {
138			physical_location: Location::Any,
139			virtual_location: Location::Any,
140			_laziness: Laziness::Lazy,
141			length,
142			physical_allocator: highmem(),
143			address_space: Kernel {},
144			protection: Protection::RWX,
145		}
146	}
147}
148
149impl Config<'static, Userspace> {
150	/// Creates a new [mapping](self) configuration with default options,
151	/// but in the specified address space
152	///
153	/// `length` is specified in pages
154	///
155	/// The default options are not guaranteed, but at the moment are:
156	/// - physical and virtual locations: anywhere
157	/// - lazily allocated
158	/// - Highmem physical allocator
159	/// - Readable, writable and executable
160	#[stable(feature = "kernel_mmap", since = "1.1.0")]
161	pub fn new_in(length: NonZero<usize>, address_space: &AddressSpace) -> Self {
162		Config {
163			physical_location: Location::Any,
164			virtual_location: Location::Any,
165			_laziness: Laziness::Lazy,
166			length,
167			physical_allocator: highmem(),
168			address_space: Userspace(AddressSpace::downgrade(address_space)),
169			protection: Protection::RWX,
170		}
171	}
172}
173
174impl<'physical_allocator, A: AddressSpaceTy> Config<'physical_allocator, A> {
175	/// Set the physical allocator to use
176	///
177	/// This is used for both the underlying memory and any page tables that need creating
178	#[unstable(feature = "kernel_mmap_config", issue = "24")]
179	pub fn physical_allocator<'a>(self, allocator: &'a dyn PhysicalAllocator) -> Config<'a, A> {
180		Config {
181			physical_allocator: allocator,
182			..self
183		}
184	}
185
186	#[unstable(feature = "kernel_mmap_config", issue = "24")]
187	pub fn protection(self, protection: Protection) -> Self {
188		Config {
189			protection,
190			.. self
191		}
192	}
193
194	/// Set the physical location of the low address of the mapping
195	#[stable(feature = "kernel_mmap", since = "1.1.0")]
196	pub fn physical_location(self, location: Location<Frame>) -> Self {
197		Config {
198			physical_location: location,
199			.. self
200		}
201	}
202
203	/// Set the virtual location of the low address of the mapping
204	///
205	/// This is currently ignored
206	#[unstable(feature = "kernel_mmap_config", issue = "24")]
207	pub fn virtual_location(self, location: Location<Page>) -> Self {
208		Config {
209			virtual_location: location,
210			.. self
211		}
212	}
213}
214
215/// Used to track if the memory underlying the mapping is contiguous
216pub(super) enum RawMappingContiguity {
217	/// The underlying physical memory is contiguous, and starts at the contained frame
218	Contiguous(Frame),
219
220	/// The underlying physical memory is discontiguous, but all allocated by the same
221	Discontiguous,
222}
223
224/// Returned from [`RawMapping::into_contiguous_raw_parts()`] if the underlying physical memory is not contiguous
225///
226/// See the documentation for [`into_contiguous_raw_parts()`](RawMapping::into_contiguous_raw_parts()) for more information.
227#[derive(Debug)]
228#[unstable(feature = "kernel_mmap_to_parts", issue = "24")]
229pub struct DiscontiguityError(());
230
231/// The raw type underlying all memory mappings.
232///
233/// This will allocate any required memory when created, and register any lazily mapped memory as such.
234/// It will also manage the page tables to correctly unmap the memory when dropped.
235#[stable(feature = "kernel_mmap", since = "1.1.0")]
236pub struct RawMapping<'phys_allocator, R: Mappable, A: AddressSpaceTy = Kernel> {
237	raw: R,
238
239	/// The address space mapped into
240	address_space: ManuallyDrop<A>,
241
242	/// Whether the underlying physical memory is contiguous or not
243	contiguity: RawMappingContiguity,
244
245	/// The first page in the mapping that is mapped to physical memory.
246	/// The region of virtual memory from `virtual_valid_start` to `virtual_valid_start + physical_length` is mapped.
247	virtual_valid_start: Page,
248	
249	/// The number of physical pages allocated to the mapping
250	physical_len: NonZero<usize>,
251
252	/// The physical allocator used for memory allocation
253	allocator: &'phys_allocator dyn PhysicalAllocator,
254	
255	/// The protection used when mapping pages into this mapping
256	protection: Protection,
257}
258
259#[stable(feature = "kernel_mmap", since = "1.1.0")]
260impl<R: Mappable, A: AddressSpaceTy> Debug for RawMapping<'_, R, A> {
261	fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
262		f.debug_struct("RawMapping")
263		 .field(
264			 "physical_base",
265			 match self.physical_start() {
266				 Ok(ref frame) => frame,
267				 Err(_) => &"<discontiguous>",
268			 }
269		 )
270		 .field("physical_length", &self.physical_len().get())
271		 .field("virtual_base", &self.virtual_start())
272		 .field("virtual_valid_start", &self.virtual_valid_start())
273		 .field("address_space", &"<address space>")
274		 .field("protection", &self.protection)
275		 .finish()
276	}
277}
278
279impl<'phys_alloc, R: Mappable> RawMapping<'phys_alloc, R, Kernel> {
280	/// Create a new memory mapping with the given configuration
281	///
282	/// All physical memory used for the initial allocation will be contiguous.
283	/// This may change in future.
284	///
285	/// # Errors
286	///
287	/// If the required physical or virtual memory could not be allocation, [`AllocError`] is returned.
288	///
289	/// # Panics
290	///
291	/// If the page tables already contained a mapping for the newly allocated virtual memory.
292	#[stable(feature = "kernel_mmap", since = "1.1.0")]
293	pub fn new(config: Config<'phys_alloc, Kernel>, reason: u16, raw: R) -> Result<Self, AllocError> {
294		let Location::Any = config.virtual_location else { todo!() };
295		
296		let virtual_len = raw.physical_length_to_virtual_length(config.length);
297		let virtual_mem = OwnedPages::new(virtual_len)?;
298
299		Self::new_at(config, reason, virtual_mem, raw)
300	}
301}
302
303impl<'phys_alloc, R: Mappable> RawMapping<'phys_alloc, R, Userspace> {
304	/// Create a new memory mapping with the given configuration
305	///
306	/// All physical memory used for the initial allocation will be contiguous.
307	/// This may change in future.
308	///
309	/// # Errors
310	///
311	/// If the required physical or virtual memory could not be allocation, [`AllocError`] is returned.
312	///
313	/// # Panics
314	///
315	/// If the page tables already contained a mapping for the newly allocated virtual memory.
316	#[stable(feature = "kernel_mmap", since = "1.1.0")]
317	pub fn new_in(config: Config<'phys_alloc, Userspace>, reason: u16, raw: R) -> Result<Self, AllocError> {
318		let Some(address_space) = Weak::upgrade(&config.address_space.0) else { return Err(AllocError) };
319		let virtual_len = raw.physical_length_to_virtual_length(config.length);
320		let virtual_mem = OwnedPages::xnew(virtual_len, &address_space, config.virtual_location)?;
321
322		Self::new_at(config, reason, virtual_mem, raw)
323	}
324}
325
326impl<'phys_alloc, R: Mappable, A: AddressSpaceTy> RawMapping<'phys_alloc, R, A> {
327	fn new_at(config: Config<'phys_alloc, A>, reason: u16, virtual_mem: OwnedPages<A>, raw: R) -> Result<Self, AllocError> {
328		let Config {
329			length: physical_len,
330			physical_allocator,
331			physical_location,
332			protection,
333			..
334		} = config;
335
336		let physical_mem = OwnedFrames::xnew(physical_len, physical_allocator, physical_location.into())?;
337
338		let (physical_base, _, _) = physical_mem.into_raw_parts();
339		let (virtual_base, _, address_space) = virtual_mem.into_raw_parts();
340		let offset_base = virtual_base + raw.physical_start_offset_from_virtual();
341
342		// TODO: huge pages
343		// FIXME: memory leak of physical and virtual memory if this fails
344		let mut page_table = address_space.get_page_table();
345		for (frame, page) in (0..physical_len.get()).map(|i| (physical_base + i, offset_base + i)) {
346			A::map_page(&mut page_table, page, frame, reason, protection)
347					.expect("Virtual memory uniquely owned by the allocation so should not be mapped in this address space");
348		}
349		drop(page_table);
350
351		Ok(Self {
352			raw,
353			address_space: ManuallyDrop::new(address_space),
354			contiguity: RawMappingContiguity::Contiguous(physical_base),
355			virtual_valid_start: offset_base,
356			physical_len,
357			allocator: physical_allocator,
358			protection,
359		})
360	}
361	
362	/// Destructure into the underlying [`OwnedFrames`] and [`OwnedPages`] that back the allocation
363	///
364	/// Depending on the implementation of [`Mappable`] used, these may be different length.
365	/// These can be turned back into a [`RawMapping`] by calling [`from_raw_parts()`].
366	///
367	/// # Errors
368	///
369	/// If the underlying physical memory is not contiguous, and so cannot be represented as a single instance
370	/// of [`OwnedFrames`], [`DiscontiguityError`] is returned.
371	#[unstable(feature = "kernel_mmap_to_parts", issue = "24")]
372	pub fn into_contiguous_raw_parts(mut self) -> Result<(OwnedFrames<'phys_alloc>, OwnedPages<A>, Protection, R), DiscontiguityError> {
373		let frames = unsafe {
374			let RawMappingContiguity::Contiguous(base_frame) = self.contiguity else {
375				return Err(DiscontiguityError(()));
376			};
377			
378			OwnedFrames::from_raw_parts(
379				base_frame,
380				self.physical_len(),
381				self.allocator,
382			)
383		};
384		
385		let address_space = unsafe { ManuallyDrop::take(&mut self.address_space) };
386		let pages = unsafe {
387			OwnedPages::from_raw_parts(
388				self.virtual_start(),
389				self.virtual_len(),
390				address_space
391			)
392		};
393
394		let this = ManuallyDrop::new(self);
395		Ok((frames, pages, this.protection, unsafe { ptr::read(&this.raw) }))
396	}
397
398	#[unstable(feature = "kernel_mmap_to_parts", issue = "24")]
399	pub unsafe fn from_contiguous_raw_parts(frames: OwnedFrames<'phys_alloc>, pages: OwnedPages<A>, protection: Protection, raw: R) -> Self {
400		let (virtual_base, actual_vlen, address_space) = pages.into_raw_parts();
401		let (physical_base, physical_len, physical_allocator) = frames.into_raw_parts();
402		let correct_vlen = raw.physical_length_to_virtual_length(physical_len);
403		debug_assert_eq!(actual_vlen, correct_vlen);
404
405		Self {
406			virtual_valid_start: virtual_base + raw.physical_start_offset_from_virtual(),
407			raw,
408			address_space: ManuallyDrop::new(address_space),
409			contiguity: RawMappingContiguity::Contiguous(physical_base),
410			physical_len,
411			allocator: physical_allocator,
412			protection,
413		}
414	}
415
416	fn virtual_len(&self) -> NonZero<usize> {
417		self.raw.physical_length_to_virtual_length(self.physical_len())
418	}
419
420	#[unstable(feature = "kernel_mmap_to_parts", issue = "24")]
421	pub fn virtual_start(&self) -> Page {
422		self.virtual_valid_start() - self.raw.physical_start_offset_from_virtual()
423	}
424
425	fn virtual_valid_start(&self) -> Page {
426		self.virtual_valid_start
427	}
428
429	#[unstable(feature = "kernel_mmap_to_parts", issue = "24")]
430	pub fn virtual_end(&self) -> Page {
431		self.virtual_start() + self.virtual_len().get()
432	}
433
434	#[unstable(feature = "kernel_mmap_to_parts", issue = "24")]
435	pub fn physical_len(&self) -> NonZero<usize> {
436		self.physical_len
437	}
438
439	#[unstable(feature = "kernel_mmap_to_parts", issue = "24")]
440	pub fn physical_start(&self) -> Result<Frame, DiscontiguityError> {
441		match self.contiguity {
442			RawMappingContiguity::Contiguous(base_frame) => Ok(base_frame),
443			RawMappingContiguity::Discontiguous => Err(DiscontiguityError(())),
444		}
445	}
446
447	#[unstable(feature = "kernel_mmap_to_parts", issue = "24")]
448	pub fn physical_end(&self) -> Result<Frame, DiscontiguityError> {
449		match self.contiguity {
450			RawMappingContiguity::Contiguous(base_frame) => Ok(base_frame + self.physical_len().get()),
451			RawMappingContiguity::Discontiguous => Err(DiscontiguityError(())),
452		}
453	}
454
455	#[unstable(feature = "kernel_mmap_to_parts", issue = "24")]
456	#[inline]
457	pub fn protection(&self) -> Protection {
458		self.protection
459	}
460
461	/// Attempts to resize the allocation to `new_len` without moving the allocation
462	/// 
463	/// If the allocation could be resized, the [`Page`] corresponding to the previous end of the mapping.
464	/// If it could not be resized, it returns the [`AllocError`] from the underlying allocators.
465	#[stable(feature = "kernel_mmap", since = "1.1.0")]
466	pub fn resize_in_place(&mut self, new_len: NonZero<usize>) -> Result<Page, AllocError> {
467		if new_len == self.physical_len() { return Ok(self.virtual_end()); }
468
469		let original_physical_allocator = self.allocator;
470
471		if new_len < self.physical_len() {
472			todo!("actually free and unmap the extra memory")
473		} else {
474			let extra_len: NonZero<usize> = new_len.get().checked_sub(self.physical_len().get())
475			                             .expect("`new_len` is checked to be greater than `physical_len`")
476										.try_into()
477										.expect("`new_len` is checked to be not equal to `physical_len`");
478
479			let extra_virtual_mem = self.address_space.allocate_contiguous_at(self.virtual_end(), extra_len.get())?; // FIXME: use OwnedPages
480
481			debug_assert_eq!(self.virtual_end(), extra_virtual_mem);
482			
483			// TODO: huge pages
484			let mut page_table = self.address_space.get_page_table();
485
486			let extra_physical_mem = OwnedFrames::xnew(extra_len, original_physical_allocator, super::allocator::Location::Any)?;
487			let (new_start_frame, _, _) = extra_physical_mem.into_raw_parts();
488
489			if let RawMappingContiguity::Contiguous(base_frame) = self.contiguity {
490				if base_frame + self.physical_len.get() != base_frame { self.contiguity = RawMappingContiguity::Discontiguous; }
491			}
492
493			// FIXME: memory leak of physical and virtual memory if this fails
494			// FIXME: this is probably wrong if the extra unmapped virtual memory is after the physical memory, not before
495			for (frame, page) in (0..extra_len.get()).map(|i| (new_start_frame + i, extra_virtual_mem + i)) {
496				A::map_page(&mut page_table, page, frame, 25, self.protection)
497						.expect("todo");
498			}
499
500			self.physical_len = new_len;
501			Ok(extra_virtual_mem)
502		}
503	}
504}
505
506#[stable(feature = "kernel_mmap", since = "1.1.0")]
507impl<R: Mappable, A: AddressSpaceTy> Drop for RawMapping<'_, R, A> {
508	fn drop(&mut self) {
509		debug!("mmap dropped: {self:x?}");
510
511		let mut page_table = self.address_space.get_page_table();
512
513		// If the underlying memory is discontiguous, we need to find the physical memory chunks via the page tables
514		// and deallocate them before unmapping. To reduce the number of allocator calls, we merge contiguous chunks
515		// together. Since the concept of a single 'allocation' does not exist in the physical allocator, this is
516		// allowed regardless of whether the chunks were allocated in one go.
517		// If the memory is contiguous, we deallocate it in one go by converting it to an OwnedFrames object and
518		// immediately dropping it.
519		match self.contiguity {
520			RawMappingContiguity::Contiguous(base_frame) => {
521				let _frames = unsafe { OwnedFrames::from_raw_parts(
522					base_frame,
523					self.physical_len(),
524					self.allocator,
525				) };
526			},
527			RawMappingContiguity::Discontiguous => {
528				let page_iter = (0..self.physical_len().get()).map(|i| self.virtual_valid_start() + i);
529				let frame_iter = page_iter.map(|page| {
530					A::translate_page(&mut page_table, page)
531							.expect("Virtual memory uniquely owned by this mmap so shouldn't be unmapped")
532				});
533
534				let drop_frame_range = |(frame, len)| {
535					let _frames = unsafe { OwnedFrames::from_raw_parts(
536						frame,
537						len,
538						self.allocator,
539					) };
540				};
541
542				// Group the frames into contiguous chunks
543				// First convert into a tuple of `(start, len)`, where each is of len 1
544				// Then reduce: if the end of one frame range is the start of the next,
545				// combine into a range with the new length; otherwise, take the already
546				// combined chunks, and deallocate in one go
547				let last_chunk = frame_iter.map(|frame| (frame, NonZero::<usize>::new(1).unwrap()))
548						.reduce(|prev_group, new_group| {
549							if prev_group.0 + prev_group.1.get() == new_group.0
550								&& let Some(combined_len) = prev_group.1.checked_add(new_group.1.get()) { // If the length overflows, just drop in two chunks
551								// contiguous, so merge
552								(prev_group.0, combined_len)
553							} else {
554								// discontigous, so drop the existing set
555								drop_frame_range(prev_group);
556								new_group
557							}
558						});
559				if let Some(last_chunk) = last_chunk {
560					drop_frame_range(last_chunk);
561				}
562			},
563		}
564
565		for page in (0..self.physical_len().get()).map(|i| self.virtual_valid_start() + i) {
566			debug!("unmapping page {page:x?}");
567			A::unmap_page(&mut page_table, page)
568					.expect("Virtual memory uniquely owned by this mmap so shouldn't be unmapped");
569		}
570
571		drop(page_table);
572		let address_space = unsafe { ManuallyDrop::take(&mut self.address_space) };
573		let _pages = unsafe {
574			OwnedPages::from_raw_parts(
575				self.virtual_start(),
576				self.virtual_len(),
577				address_space
578			)
579		};
580	}
581}
582
583#[doc(hidden)]
584#[stable(feature = "kernel_mmap", since = "1.1.0")]
585pub struct RawMmap;
586
587#[stable(feature = "kernel_mmap", since = "1.1.0")]
588impl Mappable for RawMmap {
589	fn physical_length_to_virtual_length(&self, physical_length: NonZero<usize>) -> NonZero<usize> { physical_length }
590	fn physical_start_offset_from_virtual(&self) -> isize { 0 }
591}
592
593#[doc(hidden)]
594#[stable(feature = "kernel_mmap", since = "1.1.0")]
595pub struct RawStack;
596
597#[stable(feature = "kernel_mmap", since = "1.1.0")]
598impl Mappable for RawStack {
599	fn physical_length_to_virtual_length(&self, physical_length: NonZero<usize>) -> NonZero<usize> {
600		physical_length.checked_add(1).expect("Stack size overflow")
601	}
602	fn physical_start_offset_from_virtual(&self) -> isize { 1 }
603}
604
605/// A RAII memory mapping
606///
607/// Manages a mapping directly between physical and virtual memory.
608#[allow(type_alias_bounds)] // makes docs nicer
609#[stable(feature = "kernel_mmap", since = "1.1.0")]
610pub type Mapping<'phys_alloc, A: AddressSpaceTy = Kernel> = RawMapping<'phys_alloc, RawMmap, A>;
611
612#[stable(feature = "kernel_mmap", since = "1.1.0")]
613pub fn new_mapping(config: Config<'_, Kernel>, reason: u16) -> Result<Mapping<'_, Kernel>, AllocError> {
614	Mapping::new(config, reason, RawMmap {})
615}
616
617#[stable(feature = "kernel_mmap", since = "1.1.0")]
618pub fn new_mapping_in(config: Config<'_, Userspace>, reason: u16) -> Result<Mapping<'_, Userspace>, AllocError> {
619	Mapping::new_in(config, reason, RawMmap {})
620}
621
622/// A RAII stack
623///
624/// Manages the memory map for a stack, including a guard page below the stack.
625#[allow(type_alias_bounds)] // makes docs nicer
626#[stable(feature = "kernel_mmap", since = "1.1.0")]
627pub type Stack<'phys_alloc, A: AddressSpaceTy = Kernel> = RawMapping<'phys_alloc, RawStack, A>;
628
629#[stable(feature = "kernel_mmap", since = "1.1.0")]
630pub fn new_stack(config: Config<'_, Kernel>, reason: u16) -> Result<Stack<'_, Kernel>, AllocError> {
631	Stack::new(config, reason, RawStack {})
632}
633
634#[stable(feature = "kernel_mmap", since = "1.1.0")]
635pub fn new_stack_in(config: Config<'_, Userspace>, reason: u16) -> Result<Stack<'_, Userspace>, AllocError> {
636	Stack::new_in(config, reason, RawStack {})
637}
638
639#[unstable(feature = "kernel_internals", issue = "none")]
640pub struct DynMapping {
641	physical_length_to_virtual_length: fn(*mut u8, physical_length: NonZero<usize>) -> NonZero<usize>,
642	physical_start_offset_from_virtual: fn(*mut u8) -> isize,
643}
644
645impl DynMapping {
646	#[unstable(feature = "kernel_internals", issue = "none")]
647	pub fn coerce<R: Mappable, A: AddressSpaceTy>(from: RawMapping<R, A>) -> RawMapping<DynMapping, A> where [(); 1 / ((size_of::<R>() == 0) as usize)]: {
648		let from = ManuallyDrop::new(from);
649
650		RawMapping {
651			raw: DynMapping {
652				physical_length_to_virtual_length: unsafe { mem::transmute(R::physical_length_to_virtual_length as fn(&R, physical_length: NonZero<usize>) -> NonZero<usize>) },
653				physical_start_offset_from_virtual: unsafe { mem::transmute(R::physical_start_offset_from_virtual as fn(&R) -> isize) },
654			},
655			address_space: unsafe { ptr::read(&from.address_space) },
656			contiguity: unsafe { ptr::read(&from.contiguity) },
657			virtual_valid_start: from.virtual_valid_start,
658			physical_len: from.physical_len,
659			allocator: from.allocator,
660			protection: from.protection,
661		}
662	}
663}
664
665#[unstable(feature = "kernel_internals", issue = "none")]
666impl Mappable for DynMapping {
667	fn physical_length_to_virtual_length(&self, physical_length: NonZero<usize>) -> NonZero<usize> {
668		(self.physical_length_to_virtual_length)(ptr::dangling_mut(), physical_length)
669	}
670
671	fn physical_start_offset_from_virtual(&self) -> isize {
672		(self.physical_start_offset_from_virtual)(ptr::dangling_mut())
673	}
674}