Hi Frank,
Thanks for your review.
On 2026/3/4 23:48, Frank Li wrote:
On Wed, Mar 04, 2026 at 06:14:13AM +0000, Xianwei Zhao wrote:
Amlogic A9 SoCs include a general-purpose DMA controller that can be used
by multiple peripherals, such as I2C PIO and I3C. Each peripheral group
is associated with a dedicated DMA channel in hardware.
Signed-off-by: Xianwei Zhao<[email protected]>
---
drivers/dma/Kconfig | 9 +
drivers/dma/Makefile | 1 +
drivers/dma/amlogic-dma.c | 585 ++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 595 insertions(+)
...
+
+static int aml_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct aml_dma_chan *aml_chan = to_aml_dma_chan(chan);
+ struct aml_dma_dev *aml_dma = aml_chan->aml_dma;
+ size_t size = size_mul(sizeof(struct aml_dma_sg_link), DMA_MAX_LINK);
+
+ aml_chan->sg_link = dma_alloc_coherent(aml_dma->dma_device.dev, size,
+ &aml_chan->sg_link_phys,
GFP_KERNEL);
+ if (!aml_chan->sg_link)
+ return -ENOMEM;
+
+ /* offset is the same RCH_CFG and WCH_CFG */
+ regmap_update_bits(aml_dma->regmap, aml_chan->reg_offs + RCH_CFG,
CFG_CLEAR, CFG_CLEAR);
regmap_set_bits()
+ aml_chan->status = DMA_COMPLETE;
+ dma_async_tx_descriptor_init(&aml_chan->desc, chan);
+ aml_chan->desc.tx_submit = aml_dma_tx_submit;
+ regmap_update_bits(aml_dma->regmap, aml_chan->reg_offs + RCH_CFG,
CFG_CLEAR, 0);
regmap_clear_bits();
+
+ return 0;
+}
+
+static void aml_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct aml_dma_chan *aml_chan = to_aml_dma_chan(chan);
+ struct aml_dma_dev *aml_dma = aml_chan->aml_dma;
+
+ aml_chan->status = DMA_COMPLETE;
+ dma_free_coherent(aml_dma->dma_device.dev,
+ sizeof(struct aml_dma_sg_link) * DMA_MAX_LINK,
+ aml_chan->sg_link, aml_chan->sg_link_phys);
+}
+
...
+
+static struct dma_async_tx_descriptor *aml_dma_prep_slave_sg
+ (struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct aml_dma_chan *aml_chan = to_aml_dma_chan(chan);
+ struct aml_dma_dev *aml_dma = aml_chan->aml_dma;
+ struct aml_dma_sg_link *sg_link;
+ struct scatterlist *sg;
+ int idx = 0;
+ u64 paddr;
+ u32 reg, link_count, avail, chan_id;
+ u32 i;
+
+ if (aml_chan->direction != direction) {
+ dev_err(aml_dma->dma_device.dev, "direction not support\n");
+ return NULL;
+ }
+
+ switch (aml_chan->status) {
+ case DMA_IN_PROGRESS:
+ dev_err(aml_dma->dma_device.dev, "not support multi
tx_desciptor\n");
+ return NULL;
+
+ case DMA_COMPLETE:
+ aml_chan->data_len = 0;
+ chan_id = aml_chan->chan_id;
+ reg = (direction == DMA_DEV_TO_MEM) ? WCH_INT_MASK : RCH_INT_MASK;
+ regmap_update_bits(aml_dma->regmap, reg, BIT(chan_id),
BIT(chan_id));
+
+ break;
+ default:
+ dev_err(aml_dma->dma_device.dev, "status error\n");
+ return NULL;
+ }
+
+ link_count = sg_nents_for_dma(sgl, sg_len, SG_MAX_LEN);
+
+ if (link_count > DMA_MAX_LINK) {
+ dev_err(aml_dma->dma_device.dev,
+ "maximum number of sg exceeded: %d > %d\n",
+ sg_len, DMA_MAX_LINK);
+ aml_chan->status = DMA_ERROR;
+ return NULL;
+ }
+
+ aml_chan->status = DMA_IN_PROGRESS;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ avail = sg_dma_len(sg);
+ paddr = sg->dma_address;
+ while (avail > SG_MAX_LEN) {
+ sg_link = &aml_chan->sg_link[idx++];
+ /* set dma address and len to sglink*/
+ sg_link->address = paddr;
+ sg_link->ctl = FIELD_PREP(LINK_LEN, SG_MAX_LEN);
+ paddr = paddr + SG_MAX_LEN;
+ avail = avail - SG_MAX_LEN;
+ }
+ sg_link = &aml_chan->sg_link[idx++];
+ /* set dma address and len to sglink*/
+ sg_link->address = paddr;
Support here dma_wmb() to make previous write complete before update
OWNER BIT.
Where update OWNER bit to tall DMA engine sg_link ready?
This DMA hardware does not have OWNER BIT.
DMA working steps:
The first step is to prepare the corresponding link memory.
(This is what the aml_dma_prep_slave_sg work involves.)
The second step is to write link phy address into the control register,
and data length into the control register. THis will trigger DMA work.
For the memory-to-device channel, an additional register needs to be
written to trigger the transfer
(This part is implemented in aml_enable_dma_channel function.)
In v1 and v2 I placed dma_wmb() at the beginning of
aml_enable_dma_channel. You said it was okay not to use it, so I drop it.
+ sg_link->ctl = FIELD_PREP(LINK_LEN, avail);
+
+ aml_chan->data_len += sg_dma_len(sg);
+ }
+ aml_chan->sg_link_cnt = idx;
+
+ return &aml_chan->desc;
+}
+
+static int aml_dma_pause_chan(struct dma_chan *chan)
+{
+ struct aml_dma_chan *aml_chan = to_aml_dma_chan(chan);
+ struct aml_dma_dev *aml_dma = aml_chan->aml_dma;
+
+ regmap_update_bits(aml_dma->regmap, aml_chan->reg_offs + RCH_CFG,
CFG_PAUSE, CFG_PAUSE);
regmap_set_bits(), check others
+ aml_chan->pre_status = aml_chan->status;
+ aml_chan->status = DMA_PAUSED;
+
+ return 0;
+}
+
...
+
+ dma_set_max_seg_size(dma_dev->dev, SG_MAX_LEN);
+
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+ dma_dev->device_alloc_chan_resources = aml_dma_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = aml_dma_free_chan_resources;
+ dma_dev->device_tx_status = aml_dma_tx_status;
+ dma_dev->device_prep_slave_sg = aml_dma_prep_slave_sg;
+
+ dma_dev->device_pause = aml_dma_pause_chan;
+ dma_dev->device_resume = aml_dma_resume_chan;
align callback name, aml_dma_chan_resume()
+ dma_dev->device_terminate_all = aml_dma_terminate_all;
+ dma_dev->device_issue_pending = aml_dma_enable_chan;
aml_dma_issue_pending()
Frank
+ /* PIO 4 bytes and I2C 1 byte */
+ dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
+ dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
...
--
2.52.0