Re: [RESEND PATCH v6 13/14] iommu/rockchip: Add runtime PM support
Hi Tomasz, Thanks for your reply. On 03/05/2018 09:49 PM, Tomasz Figa wrote: > struct rk_iommudata { >+ struct device_link *link; /* runtime PM link from IOMMU to master */ Kerneldoc comment for the struct could be added instead. i saw this in the kerneldoc: * An MMU device exists alongside a busmaster device, both are in the same power domain. The MMU implements DMA address translation for the busmaster device and shall be runtime resumed and kept active whenever and as long as the busmaster device is active. The busmaster device's driver shall not bind before the MMU is bound. To achieve this, a device link with runtime PM integration is added from the busmaster device (consumer) to the MMU device (supplier). The effect with regards to runtime PM is the same as if the MMU was the parent of the master device. maybe we can use something like: device link with runtime PM integration from the master (consumer) to the IOMMU (supplier). > struct rk_iommu *iommu; > }; > >@@ -518,7 +520,12 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) > u32 int_status; > dma_addr_t iova; > irqreturn_t ret = IRQ_NONE; >- int i; >+ int i, err, need_runtime_put; nit: need_runtime_put could be a bool. ok >+ >+ err = pm_runtime_get_if_in_use(iommu->dev); >+ if (err <= 0 && err != -EINVAL) >+ return ret; >+ need_runtime_put = err > 0; Generally something must be really wrong if we end up with err == 0 here, because the IOMMU must be powered on to signal an interrupt. The only case this could happen would be if the IRQ signal was shared with some device from another power domain. Is it possible on Rockchip SoCs? If not, perhaps we should have a WARN_ON() here for such case. the irq could be shared between master and IOMMU, but always from the same power domain i think. will add a WARN_ON() > > WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); > >@@ -570,6 +577,9 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) > > clk_bulk_disable(iommu->num_clocks, iommu->clocks); > >+ if (need_runtime_put) >+ pm_runtime_put(iommu->dev); >+ > return ret; > } > >@@ -611,10 +621,20 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, > spin_lock_irqsave(&rk_domain->iommus_lock, flags); > list_for_each(pos, &rk_domain->iommus) { > struct rk_iommu *iommu; >+ int ret; >+ > iommu = list_entry(pos, struct rk_iommu, node); >- WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); >- rk_iommu_zap_lines(iommu, iova, size); >- clk_bulk_disable(iommu->num_clocks, iommu->clocks); >+ >+ /* Only zap TLBs of IOMMUs that are powered on. */ >+ ret = pm_runtime_get_if_in_use(iommu->dev); >+ if (ret > 0 || ret == -EINVAL) { >+ WARN_ON(clk_bulk_enable(iommu->num_clocks, >+ iommu->clocks)); >+ rk_iommu_zap_lines(iommu, iova, size); >+ clk_bulk_disable(iommu->num_clocks, iommu->clocks); >+ } >+ if (ret > 0) >+ pm_runtime_put(iommu->dev); > } > spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); > } >@@ -817,22 +837,30 @@ static struct rk_iommu *rk_iommu_from_dev(struct device *dev) > return data ? data->iommu : NULL; > } > >-static int rk_iommu_attach_device(struct iommu_domain *domain, >- struct device *dev) >+/* Must be called with iommu powered on and attached */ >+static void rk_iommu_shutdown(struct rk_iommu *iommu) > { >- struct rk_iommu *iommu; >+ int i; >+ >+ /* Ignore error while disabling, just keep going */ >+ WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); >+ rk_iommu_enable_stall(iommu); >+ rk_iommu_disable_paging(iommu); >+ for (i = 0; i < iommu->num_mmu; i++) { >+ rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0); >+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0); >+ } >+ rk_iommu_disable_stall(iommu); >+ clk_bulk_disable(iommu->num_clocks, iommu->clocks); >+} >+ >+/* Must be called with iommu powered on and attached */ >+static int rk_iommu_startup(struct rk_iommu *iommu) >+{ >+ struct iommu_domain *domain = iommu->domain; > struct rk_iommu_domain *rk_domain = to_rk_domain(domain); >- unsigned long flags; > int ret, i; > >- /* >-* Allow 'virtual devices' (e.g., drm) to attach to domain. >-* Such a device does not belong to an iommu group. >-*/ >- iommu = rk_iommu_from_dev(dev); >- if (!iommu) >- return 0; >- > ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks); >
Re: [RESEND PATCH v6 13/14] iommu/rockchip: Add runtime PM support
On Mon, Mar 5, 2018 at 11:13 PM, Robin Murphy wrote: > On 05/03/18 13:49, Tomasz Figa wrote: > [...] >>> >>> @@ -518,7 +520,12 @@ static irqreturn_t rk_iommu_irq(int irq, void >>> *dev_id) >>> u32 int_status; >>> dma_addr_t iova; >>> irqreturn_t ret = IRQ_NONE; >>> - int i; >>> + int i, err, need_runtime_put; >> >> >> nit: need_runtime_put could be a bool. >> >>> + >>> + err = pm_runtime_get_if_in_use(iommu->dev); >>> + if (err <= 0 && err != -EINVAL) >>> + return ret; >>> + need_runtime_put = err > 0; >> >> >> Generally something must be really wrong if we end up with err == 0 >> here, because the IOMMU must be powered on to signal an interrupt. The >> only case this could happen would be if the IRQ signal was shared with >> some device from another power domain. Is it possible on Rockchip >> SoCs? If not, perhaps we should have a WARN_ON() here for such case. > > > In general, there's almost certainly some time window between the interrupt > level being latched at the GIC and the IRQ actually being taken by its > target CPU, in which potentially the power could be removed and/or the > clocks gated - especially if there are higher-priority IRQs pending at the > same time and the racing PM call is on some other CPU. Sure, it's probably > unlikely, but I wouldn't necessarily consider it completely erroneous. Clocks are not a problem here, since the handler re-enables them and clk_enable() is IRQ-safe. However, runtime PM might need sleeping, so we can't just get_sync() from the handler. I guess, we should just bail out in such case, since the power off would probably clear any internal interrupt state anyway. Also, the interrupt would be basically a page fault, during which the master device would be stalled, so it's rather unlikely that we see its driver putting the runtime PM, which would only happen after the master device resumes and competes (or something times out). So probably WARN_ON() isn't such bad idea still. ___ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu
Re: [RESEND PATCH v6 13/14] iommu/rockchip: Add runtime PM support
On 05/03/18 13:49, Tomasz Figa wrote: [...] @@ -518,7 +520,12 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) u32 int_status; dma_addr_t iova; irqreturn_t ret = IRQ_NONE; - int i; + int i, err, need_runtime_put; nit: need_runtime_put could be a bool. + + err = pm_runtime_get_if_in_use(iommu->dev); + if (err <= 0 && err != -EINVAL) + return ret; + need_runtime_put = err > 0; Generally something must be really wrong if we end up with err == 0 here, because the IOMMU must be powered on to signal an interrupt. The only case this could happen would be if the IRQ signal was shared with some device from another power domain. Is it possible on Rockchip SoCs? If not, perhaps we should have a WARN_ON() here for such case. In general, there's almost certainly some time window between the interrupt level being latched at the GIC and the IRQ actually being taken by its target CPU, in which potentially the power could be removed and/or the clocks gated - especially if there are higher-priority IRQs pending at the same time and the racing PM call is on some other CPU. Sure, it's probably unlikely, but I wouldn't necessarily consider it completely erroneous. Robin. ___ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu
Re: [RESEND PATCH v6 13/14] iommu/rockchip: Add runtime PM support
Hi Jeffy, On Thu, Mar 1, 2018 at 7:18 PM, Jeffy Chen wrote: > When the power domain is powered off, the IOMMU cannot be accessed and > register programming must be deferred until the power domain becomes > enabled. > > Add runtime PM support, and use runtime PM device link from IOMMU to > master to startup and shutdown IOMMU. > > Signed-off-by: Jeffy Chen > --- > > Changes in v6: None > Changes in v5: > Avoid race about pm_runtime_get_if_in_use() and pm_runtime_enabled(). > > Changes in v4: None > Changes in v3: > Only call startup() and shutdown() when iommu attached. > Remove pm_mutex. > Check runtime PM disabled. > Check pm_runtime in rk_iommu_irq(). > > Changes in v2: None > > drivers/iommu/rockchip-iommu.c | 181 > +++-- > 1 file changed, 140 insertions(+), 41 deletions(-) > > diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c > index 2448a0528e39..0e0a42f41818 100644 > --- a/drivers/iommu/rockchip-iommu.c > +++ b/drivers/iommu/rockchip-iommu.c > @@ -22,6 +22,7 @@ > #include > #include > #include > +#include > #include > #include > > @@ -106,6 +107,7 @@ struct rk_iommu { > }; > > struct rk_iommudata { > + struct device_link *link; /* runtime PM link from IOMMU to master */ Kerneldoc comment for the struct could be added instead. > struct rk_iommu *iommu; > }; > > @@ -518,7 +520,12 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) > u32 int_status; > dma_addr_t iova; > irqreturn_t ret = IRQ_NONE; > - int i; > + int i, err, need_runtime_put; nit: need_runtime_put could be a bool. > + > + err = pm_runtime_get_if_in_use(iommu->dev); > + if (err <= 0 && err != -EINVAL) > + return ret; > + need_runtime_put = err > 0; Generally something must be really wrong if we end up with err == 0 here, because the IOMMU must be powered on to signal an interrupt. The only case this could happen would be if the IRQ signal was shared with some device from another power domain. Is it possible on Rockchip SoCs? If not, perhaps we should have a WARN_ON() here for such case. > > WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); > > @@ -570,6 +577,9 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) > > clk_bulk_disable(iommu->num_clocks, iommu->clocks); > > + if (need_runtime_put) > + pm_runtime_put(iommu->dev); > + > return ret; > } > > @@ -611,10 +621,20 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain > *rk_domain, > spin_lock_irqsave(&rk_domain->iommus_lock, flags); > list_for_each(pos, &rk_domain->iommus) { > struct rk_iommu *iommu; > + int ret; > + > iommu = list_entry(pos, struct rk_iommu, node); > - WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); > - rk_iommu_zap_lines(iommu, iova, size); > - clk_bulk_disable(iommu->num_clocks, iommu->clocks); > + > + /* Only zap TLBs of IOMMUs that are powered on. */ > + ret = pm_runtime_get_if_in_use(iommu->dev); > + if (ret > 0 || ret == -EINVAL) { > + WARN_ON(clk_bulk_enable(iommu->num_clocks, > + iommu->clocks)); > + rk_iommu_zap_lines(iommu, iova, size); > + clk_bulk_disable(iommu->num_clocks, iommu->clocks); > + } > + if (ret > 0) > + pm_runtime_put(iommu->dev); > } > spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); > } > @@ -817,22 +837,30 @@ static struct rk_iommu *rk_iommu_from_dev(struct device > *dev) > return data ? data->iommu : NULL; > } > > -static int rk_iommu_attach_device(struct iommu_domain *domain, > - struct device *dev) > +/* Must be called with iommu powered on and attached */ > +static void rk_iommu_shutdown(struct rk_iommu *iommu) > { > - struct rk_iommu *iommu; > + int i; > + > + /* Ignore error while disabling, just keep going */ > + WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); > + rk_iommu_enable_stall(iommu); > + rk_iommu_disable_paging(iommu); > + for (i = 0; i < iommu->num_mmu; i++) { > + rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0); > + rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0); > + } > + rk_iommu_disable_stall(iommu); > + clk_bulk_disable(iommu->num_clocks, iommu->clocks); > +} > + > +/* Must be called with iommu powered on and attached */ > +static int rk_iommu_startup(struct rk_iommu *iommu) > +{ > + struct iommu_domain *domain = iommu->domain; > struct rk_iommu_domain *rk_domain = to_rk_domain(domain); > - unsigned long flags; > int ret, i; > > - /* > -* Allow 'virtual
[RESEND PATCH v6 13/14] iommu/rockchip: Add runtime PM support
When the power domain is powered off, the IOMMU cannot be accessed and register programming must be deferred until the power domain becomes enabled. Add runtime PM support, and use runtime PM device link from IOMMU to master to startup and shutdown IOMMU. Signed-off-by: Jeffy Chen --- Changes in v6: None Changes in v5: Avoid race about pm_runtime_get_if_in_use() and pm_runtime_enabled(). Changes in v4: None Changes in v3: Only call startup() and shutdown() when iommu attached. Remove pm_mutex. Check runtime PM disabled. Check pm_runtime in rk_iommu_irq(). Changes in v2: None drivers/iommu/rockchip-iommu.c | 181 +++-- 1 file changed, 140 insertions(+), 41 deletions(-) diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 2448a0528e39..0e0a42f41818 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -106,6 +107,7 @@ struct rk_iommu { }; struct rk_iommudata { + struct device_link *link; /* runtime PM link from IOMMU to master */ struct rk_iommu *iommu; }; @@ -518,7 +520,12 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) u32 int_status; dma_addr_t iova; irqreturn_t ret = IRQ_NONE; - int i; + int i, err, need_runtime_put; + + err = pm_runtime_get_if_in_use(iommu->dev); + if (err <= 0 && err != -EINVAL) + return ret; + need_runtime_put = err > 0; WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); @@ -570,6 +577,9 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) clk_bulk_disable(iommu->num_clocks, iommu->clocks); + if (need_runtime_put) + pm_runtime_put(iommu->dev); + return ret; } @@ -611,10 +621,20 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, spin_lock_irqsave(&rk_domain->iommus_lock, flags); list_for_each(pos, &rk_domain->iommus) { struct rk_iommu *iommu; + int ret; + iommu = list_entry(pos, struct rk_iommu, node); - WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); - rk_iommu_zap_lines(iommu, iova, size); - clk_bulk_disable(iommu->num_clocks, iommu->clocks); + + /* Only zap TLBs of IOMMUs that are powered on. */ + ret = pm_runtime_get_if_in_use(iommu->dev); + if (ret > 0 || ret == -EINVAL) { + WARN_ON(clk_bulk_enable(iommu->num_clocks, + iommu->clocks)); + rk_iommu_zap_lines(iommu, iova, size); + clk_bulk_disable(iommu->num_clocks, iommu->clocks); + } + if (ret > 0) + pm_runtime_put(iommu->dev); } spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); } @@ -817,22 +837,30 @@ static struct rk_iommu *rk_iommu_from_dev(struct device *dev) return data ? data->iommu : NULL; } -static int rk_iommu_attach_device(struct iommu_domain *domain, - struct device *dev) +/* Must be called with iommu powered on and attached */ +static void rk_iommu_shutdown(struct rk_iommu *iommu) { - struct rk_iommu *iommu; + int i; + + /* Ignore error while disabling, just keep going */ + WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); + rk_iommu_enable_stall(iommu); + rk_iommu_disable_paging(iommu); + for (i = 0; i < iommu->num_mmu; i++) { + rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0); + rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0); + } + rk_iommu_disable_stall(iommu); + clk_bulk_disable(iommu->num_clocks, iommu->clocks); +} + +/* Must be called with iommu powered on and attached */ +static int rk_iommu_startup(struct rk_iommu *iommu) +{ + struct iommu_domain *domain = iommu->domain; struct rk_iommu_domain *rk_domain = to_rk_domain(domain); - unsigned long flags; int ret, i; - /* -* Allow 'virtual devices' (e.g., drm) to attach to domain. -* Such a device does not belong to an iommu group. -*/ - iommu = rk_iommu_from_dev(dev); - if (!iommu) - return 0; - ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks); if (ret) return ret; @@ -845,8 +873,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, if (ret) goto out_disable_stall; - iommu->domain = domain; - for (i = 0; i < iommu->num_mmu; i++) { rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, rk_domain->dt_dma); @@ -855,14 +881,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,