The DONE_INT_MASK and ABORT_INT_MASK registers are shared by all DMA channels, and modifying them requires a read-modify-write sequence. Because this operation is not atomic, concurrent calls to dw_edma_v0_core_start() can introduce race conditions if two channels update these registers simultaneously.
Add a spinlock to serialize access to these registers and prevent race conditions. Signed-off-by: Frank Li <[email protected]> --- drivers/dma/dw-edma/dw-edma-v0-core.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c index b75fdaffad9a4ea6cd8d15e8f43bea550848b46c..2850a9df80f54d00789144415ed2dfe31dea3965 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-core.c +++ b/drivers/dma/dw-edma/dw-edma-v0-core.c @@ -364,6 +364,7 @@ static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) { struct dw_edma_chan *chan = chunk->chan; struct dw_edma *dw = chan->dw; + unsigned long flags; u32 tmp; dw_edma_v0_core_write_chunk(chunk); @@ -408,6 +409,8 @@ static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) } } /* Interrupt unmask - done, abort */ + raw_spin_lock_irqsave(&dw->lock, flags); + tmp = GET_RW_32(dw, chan->dir, int_mask); tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)); tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)); @@ -416,6 +419,9 @@ static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) tmp = GET_RW_32(dw, chan->dir, linked_list_err_en); tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id)); SET_RW_32(dw, chan->dir, linked_list_err_en, tmp); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + /* Channel control */ SET_CH_32(dw, chan->dir, chan->id, ch_control1, (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); -- 2.34.1
