On Wed, May 07, 2025 at 10:52:37PM +0900, Alexandre Courbot wrote: > Since we will need to allocate lots of distinct memory chunks to be > shared between GPU and CPU, introduce a type dedicated to that. It is a > light wrapper around CoherentAllocation. > > Signed-off-by: Alexandre Courbot <acour...@nvidia.com> > --- > drivers/gpu/nova-core/dma.rs | 60 > ++++++++++++++++++++++++++++++++++++++ > drivers/gpu/nova-core/nova_core.rs | 1 + > 2 files changed, 61 insertions(+) > > diff --git a/drivers/gpu/nova-core/dma.rs b/drivers/gpu/nova-core/dma.rs > new file mode 100644 > index > 0000000000000000000000000000000000000000..9d90ae01d0044eaab4ddbc3eba216741d7a623ef > --- /dev/null > +++ b/drivers/gpu/nova-core/dma.rs > @@ -0,0 +1,60 @@ > +// SPDX-License-Identifier: GPL-2.0 > + > +//! Simple DMA object wrapper. > + > +// To be removed when all code is used. > +#![expect(dead_code)] > + > +use core::ops::{Deref, DerefMut}; > + > +use kernel::device; > +use kernel::dma::CoherentAllocation; > +use kernel::page::PAGE_SIZE; > +use kernel::prelude::*; > + > +pub(crate) struct DmaObject { > + dma: CoherentAllocation<u8>, > +} > + > +impl DmaObject { > + pub(crate) fn new(dev: &device::Device<device::Bound>, len: usize) -> > Result<Self> { > + let len = core::alloc::Layout::from_size_align(len, PAGE_SIZE) > + .map_err(|_| EINVAL)? > + .pad_to_align() > + .size(); > + let dma = CoherentAllocation::alloc_coherent(dev, len, GFP_KERNEL | > __GFP_ZERO)?; > + > + Ok(Self { dma }) > + } > + > + pub(crate) fn from_data(dev: &device::Device<device::Bound>, data: > &[u8]) -> Result<Self> { > + Self::new(dev, data.len()).map(|mut dma_obj| { > + // SAFETY: > + // - The copied data fits within the size of the allocated > object. > + // - We have just created this object and there is no other user > at this stage. > + unsafe { > + core::ptr::copy_nonoverlapping( > + data.as_ptr(), > + dma_obj.dma.start_ptr_mut(), > + data.len(), > + ); > + }
This will be replaced with CoherentAllocation::write() I suppose? Please add a corresponding TODO.