Re: [PATCH V13 12/12] PCI: tegra: Add Tegra194 PCIe support
On Mon, Aug 05, 2019 at 10:24:42PM +0530, Vidya Sagar wrote: [...] > > > > IRQs are enabled when you call a suspend_noirq() callback, so the > > > > blocking API can be used as long as the IRQ descriptor backing > > > > the IRQ that will wake-up the blocked call is marked as > > > > IRQF_NO_SUSPEND. > > > > > > > > The problem is not IRQs enabled/disabled at CPU level, the problem is > > > > the IRQ descriptor of the IRQ required to handle the blocking BPMP call, > > > > mark it as IRQF_NO_SUSPEND and remove the tegra_bpmp_transfer_atomic() > > > > call from this code (or please give me a concrete example pinpointing > > > > why it is needed). > > > Ideally, using tegra_bpmp_transfer() alone in all paths (.probe() as > > > well as .resume_noirq()) should have worked as the corresponding IRQ > > > is already flagged as IRQF_NO_SUSPEND, but, because of the way BPMP-FW > > > driver in kernel making its interface available through > > > .resume_early(), tegra_bpmp_transfer() wasn't working as expected and > > > I pushed a patch (CC'ing you) at > > > http://patchwork.ozlabs.org/patch/1140973/ to make it .resume_noirq() > > > from .resume_early(). With that in place, we can just use > > > tegra_bpmp_trasnfer(). I'll push a new patch with this change once my > > > BPMP-FW driver patch is approved. > > > > Does this leave you with a resume_noirq() callbacks ordering issue to > > sort out ? > Not really. > > > > > a.k.a How will you guarantee that the BPMP will resume before the host > > bridge ? > It is already taken care of in the following way. PCIe controller's > device-tree node has an entry with a phandle of BPMP-FW's node to get > a handle of it and PCIe driver uses tegra_bpmp_get() API for that. > This API returns -EPROBE_DEFER if BPMP-FW's driver is not ready yet, > which guarantees that PCIe driver gets loaded only after BPMP-FW's > driver and this order is followed during noirq phase also. OK, thanks, this makes much more sense than the original code. Lorenzo > > Thanks, > > Lorenzo > > > > > Thanks, > > > Vidya Sagar > > > > > > > > Thanks, > > > > Lorenzo > > > > > > > > > I'll go ahead and make next patch series with this if this looks fine > > > > > to you. > > > > > > > > > > > > > > > > > > > Actually, if tegra_bpmp_transfer() requires IRQs to be enabled > > > > > > > > you may > > > > > > > > even end up in a situation where that blocking call does not > > > > > > > > wake up > > > > > > > > because the IRQ in question was disabled in the NOIRQ > > > > > > > > suspend/resume > > > > > > > > phase. > > > > > > > > > > > > > > > > [...] > > > > > > > > > > > > > > > > > > > +static int tegra_pcie_dw_probe(struct platform_device > > > > > > > > > > > *pdev) > > > > > > > > > > > +{ > > > > > > > > > > > + const struct tegra_pcie_soc *data; > > > > > > > > > > > + struct device *dev = &pdev->dev; > > > > > > > > > > > + struct resource *atu_dma_res; > > > > > > > > > > > + struct tegra_pcie_dw *pcie; > > > > > > > > > > > + struct resource *dbi_res; > > > > > > > > > > > + struct pcie_port *pp; > > > > > > > > > > > + struct dw_pcie *pci; > > > > > > > > > > > + struct phy **phys; > > > > > > > > > > > + char *name; > > > > > > > > > > > + int ret; > > > > > > > > > > > + u32 i; > > > > > > > > > > > + > > > > > > > > > > > + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); > > > > > > > > > > > + if (!pcie) > > > > > > > > > > > + return -ENOMEM; > > > > > > > > > > > + > > > > > > > > > > > + pci = &pcie->pci; > > > > > > > > > > > + pci->dev = &pdev->dev; > > > > > > > > > > > + pci->ops = &tegra_dw_pcie_ops; > > > > > > > > > > > + pp = &pci->pp; > > > > > > > > > > > + pcie->dev = &pdev->dev; > > > > > > > > > > > + > > > > > > > > > > > + data = (struct tegra_pcie_soc > > > > > > > > > > > *)of_device_get_match_data(dev); > > > > > > > > > > > + if (!data) > > > > > > > > > > > + return -EINVAL; > > > > > > > > > > > + pcie->mode = (enum dw_pcie_device_mode)data->mode; > > > > > > > > > > > + > > > > > > > > > > > + ret = tegra_pcie_dw_parse_dt(pcie); > > > > > > > > > > > + if (ret < 0) { > > > > > > > > > > > + dev_err(dev, "Failed to parse device tree: > > > > > > > > > > > %d\n", ret); > > > > > > > > > > > + return ret; > > > > > > > > > > > + } > > > > > > > > > > > + > > > > > > > > > > > + pcie->pex_ctl_supply = devm_regulator_get(dev, > > > > > > > > > > > "vddio-pex-ctl"); > > > > > > > > > > > + if (IS_ERR(pcie->pex_ctl_supply)) { > > > > > > > > > > > + dev_err(dev, "Failed to get regulator: %ld\n", > > > > > > > > > > > + PTR_ERR(pcie->pex_ctl_supply)); > > > > > > > > > > > + return PTR_ERR(pcie->pex_ctl_supply); > > > > > > > > > > > + } > > > > > > > > > > > + > > > > > > > > > > > + pcie->core_clk = devm_clk_get(dev, "core"); > > > > > > > > > > > + if (IS_ERR(pcie->core_clk)) { > > > > > > > > > > > + dev_err(dev, "Failed to get core clock: %ld\n", > > > > > > > > > > > +
Re: [PATCH V13 12/12] PCI: tegra: Add Tegra194 PCIe support
On 8/5/2019 7:31 PM, Lorenzo Pieralisi wrote: On Fri, Aug 02, 2019 at 05:36:43PM +0530, Vidya Sagar wrote: On 7/30/2019 9:19 PM, Lorenzo Pieralisi wrote: On Tue, Jul 23, 2019 at 08:14:08PM +0530, Vidya Sagar wrote: On 7/16/2019 4:52 PM, Lorenzo Pieralisi wrote: On Sat, Jul 13, 2019 at 12:34:34PM +0530, Vidya Sagar wrote: [...] +static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, + bool enable) +{ + struct mrq_uphy_response resp; + struct tegra_bpmp_message msg; + struct mrq_uphy_request req; + int err; + + if (pcie->cid == 5) + return 0; What's wrong with cid == 5 ? Explain please. Controller with ID=5 doesn't need any programming to enable it which is done here through calling firmware API. + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE; + req.controller_state.pcie_controller = pcie->cid; + req.controller_state.enable = enable; + + memset(&msg, 0, sizeof(msg)); + msg.mrq = MRQ_UPHY; + msg.tx.data = &req; + msg.tx.size = sizeof(req); + msg.rx.data = &resp; + msg.rx.size = sizeof(resp); + + if (irqs_disabled()) Can you explain to me what this check is meant to achieve please ? Firmware interface provides different APIs to be called when there are no interrupts enabled in the system (noirq context) and otherwise hence checking that situation here and calling appropriate API. That's what I am questioning. Being called from {suspend/resume}_noirq() callbacks (if that's the code path this check caters for) does not mean irqs_disabled() == true. Agree. Actually, I got a hint of having this check from the following. Both tegra_bpmp_transfer_atomic() and tegra_bpmp_transfer() are indirectly called by APIs registered with .master_xfer() and .master_xfer_atomic() hooks of struct i2c_algorithm and the decision to call which one of these is made using the following check in i2c-core.h file. static inline bool i2c_in_atomic_xfer_mode(void) { return system_state > SYSTEM_RUNNING && irqs_disabled(); } I think I should use this condition as is IIUC. Please let me know if there are any concerns with this. It is not a concern, it is just that I don't understand how this code can be called with IRQs disabled, if you can give me an execution path I am happy to leave the check there. On top of that, when called from suspend NOIRQ context, it is likely to use the blocking API (because IRQs aren't disabled at CPU level) behind which there is most certainly an IRQ required to wake the thread up and if the IRQ in question was disabled in the suspend NOIRQ phase this code is likely to deadlock. I want to make sure we can justify adding this check, I do not want to add it because we think it can be needed when it may not be needed at all (and it gets copy and pasted over and over again in other drivers). I had a discussion internally about this and the prescribed usage of these APIs seem to be that use tegra_bpmp_transfer() in .probe() and other paths where interrupts are enabled as this API needs interrupts to be enabled for its working. Use tegra_bpmp_transfer_atomic() surrounded by local_irq_save()/local_irq_restore() in other paths where interrupt servicing is disabled. Why tegra_bpmp_transfer_atomic() needs IRQs to be disabled ? And why is it needed in this piece of code where IRQs are _never_ disabled at CPU level ? IRQs are enabled when you call a suspend_noirq() callback, so the blocking API can be used as long as the IRQ descriptor backing the IRQ that will wake-up the blocked call is marked as IRQF_NO_SUSPEND. The problem is not IRQs enabled/disabled at CPU level, the problem is the IRQ descriptor of the IRQ required to handle the blocking BPMP call, mark it as IRQF_NO_SUSPEND and remove the tegra_bpmp_transfer_atomic() call from this code (or please give me a concrete example pinpointing why it is needed). Ideally, using tegra_bpmp_transfer() alone in all paths (.probe() as well as .resume_noirq()) should have worked as the corresponding IRQ is already flagged as IRQF_NO_SUSPEND, but, because of the way BPMP-FW driver in kernel making its interface available through .resume_early(), tegra_bpmp_transfer() wasn't working as expected and I pushed a patch (CC'ing you) at http://patchwork.ozlabs.org/patch/1140973/ to make it .resume_noirq() from .resume_early(). With that in place, we can just use tegra_bpmp_trasnfer(). I'll push a new patch with this change once my BPMP-FW driver patch is approved. Does this leave you with a resume_noirq() callbacks ordering issue to sort out ? Not really. a.k.a How will you guarantee that the BPMP will resume before the host bridge ? It is already taken care of in the following way. PCIe controller's device-tree node has an entry with a phandle of BPMP-FW's node to get a handle of it and PCIe driver uses tegr
Re: [PATCH V13 12/12] PCI: tegra: Add Tegra194 PCIe support
On Fri, Aug 02, 2019 at 05:36:43PM +0530, Vidya Sagar wrote: > On 7/30/2019 9:19 PM, Lorenzo Pieralisi wrote: > > On Tue, Jul 23, 2019 at 08:14:08PM +0530, Vidya Sagar wrote: > > > On 7/16/2019 4:52 PM, Lorenzo Pieralisi wrote: > > > > On Sat, Jul 13, 2019 at 12:34:34PM +0530, Vidya Sagar wrote: > > > > > > > > [...] > > > > > > > > > > > > > +static int tegra_pcie_bpmp_set_ctrl_state(struct > > > > > > > > > tegra_pcie_dw *pcie, > > > > > > > > > + bool enable) > > > > > > > > > +{ > > > > > > > > > + struct mrq_uphy_response resp; > > > > > > > > > + struct tegra_bpmp_message msg; > > > > > > > > > + struct mrq_uphy_request req; > > > > > > > > > + int err; > > > > > > > > > + > > > > > > > > > + if (pcie->cid == 5) > > > > > > > > > + return 0; > > > > > > > > > > > > > > > > What's wrong with cid == 5 ? Explain please. > > > > > > > Controller with ID=5 doesn't need any programming to enable it > > > > > > > which is > > > > > > > done here through calling firmware API. > > > > > > > > > > > > > > > > > > > > > > > > + memset(&req, 0, sizeof(req)); > > > > > > > > > + memset(&resp, 0, sizeof(resp)); > > > > > > > > > + > > > > > > > > > + req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE; > > > > > > > > > + req.controller_state.pcie_controller = pcie->cid; > > > > > > > > > + req.controller_state.enable = enable; > > > > > > > > > + > > > > > > > > > + memset(&msg, 0, sizeof(msg)); > > > > > > > > > + msg.mrq = MRQ_UPHY; > > > > > > > > > + msg.tx.data = &req; > > > > > > > > > + msg.tx.size = sizeof(req); > > > > > > > > > + msg.rx.data = &resp; > > > > > > > > > + msg.rx.size = sizeof(resp); > > > > > > > > > + > > > > > > > > > + if (irqs_disabled()) > > > > > > > > > > > > > > > > Can you explain to me what this check is meant to achieve > > > > > > > > please ? > > > > > > > Firmware interface provides different APIs to be called when > > > > > > > there are > > > > > > > no interrupts enabled in the system (noirq context) and otherwise > > > > > > > hence checking that situation here and calling appropriate API. > > > > > > > > > > > > That's what I am questioning. Being called from > > > > > > {suspend/resume}_noirq() > > > > > > callbacks (if that's the code path this check caters for) does not > > > > > > mean > > > > > > irqs_disabled() == true. > > > > > Agree. > > > > > Actually, I got a hint of having this check from the following. > > > > > Both tegra_bpmp_transfer_atomic() and tegra_bpmp_transfer() are > > > > > indirectly > > > > > called by APIs registered with .master_xfer() and > > > > > .master_xfer_atomic() hooks of > > > > > struct i2c_algorithm and the decision to call which one of these is > > > > > made using the > > > > > following check in i2c-core.h file. > > > > > static inline bool i2c_in_atomic_xfer_mode(void) > > > > > { > > > > > return system_state > SYSTEM_RUNNING && irqs_disabled(); > > > > > } > > > > > I think I should use this condition as is IIUC. > > > > > Please let me know if there are any concerns with this. > > > > > > > > It is not a concern, it is just that I don't understand how this code > > > > can be called with IRQs disabled, if you can give me an execution path I > > > > am happy to leave the check there. On top of that, when called from > > > > suspend NOIRQ context, it is likely to use the blocking API (because > > > > IRQs aren't disabled at CPU level) behind which there is most certainly > > > > an IRQ required to wake the thread up and if the IRQ in question was > > > > disabled in the suspend NOIRQ phase this code is likely to deadlock. > > > > > > > > I want to make sure we can justify adding this check, I do not > > > > want to add it because we think it can be needed when it may not > > > > be needed at all (and it gets copy and pasted over and over again > > > > in other drivers). > > > I had a discussion internally about this and the prescribed usage of > > > these APIs > > > seem to be that > > > use tegra_bpmp_transfer() in .probe() and other paths where interrupts are > > > enabled as this API needs interrupts to be enabled for its working. > > > Use tegra_bpmp_transfer_atomic() surrounded by > > > local_irq_save()/local_irq_restore() > > > in other paths where interrupt servicing is disabled. > > > > Why tegra_bpmp_transfer_atomic() needs IRQs to be disabled ? And why > > is it needed in this piece of code where IRQs are _never_ disabled > > at CPU level ? > > > > IRQs are enabled when you call a suspend_noirq() callback, so the > > blocking API can be used as long as the IRQ descriptor backing > > the IRQ that will wake-up the blocked call is marked as > > IRQF_NO_SUSPEND. > > > > The problem is not IRQs enabled/disabled at CPU level, the problem is > > the IRQ descriptor of the IRQ required to handle the blocking BPMP call, > > mark it as IRQF_NO_SUSPEND and remove the tegra_bpmp_transfer_atomic()
Re: [PATCH V13 12/12] PCI: tegra: Add Tegra194 PCIe support
On 7/30/2019 9:19 PM, Lorenzo Pieralisi wrote: On Tue, Jul 23, 2019 at 08:14:08PM +0530, Vidya Sagar wrote: On 7/16/2019 4:52 PM, Lorenzo Pieralisi wrote: On Sat, Jul 13, 2019 at 12:34:34PM +0530, Vidya Sagar wrote: [...] +static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, + bool enable) +{ + struct mrq_uphy_response resp; + struct tegra_bpmp_message msg; + struct mrq_uphy_request req; + int err; + + if (pcie->cid == 5) + return 0; What's wrong with cid == 5 ? Explain please. Controller with ID=5 doesn't need any programming to enable it which is done here through calling firmware API. + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE; + req.controller_state.pcie_controller = pcie->cid; + req.controller_state.enable = enable; + + memset(&msg, 0, sizeof(msg)); + msg.mrq = MRQ_UPHY; + msg.tx.data = &req; + msg.tx.size = sizeof(req); + msg.rx.data = &resp; + msg.rx.size = sizeof(resp); + + if (irqs_disabled()) Can you explain to me what this check is meant to achieve please ? Firmware interface provides different APIs to be called when there are no interrupts enabled in the system (noirq context) and otherwise hence checking that situation here and calling appropriate API. That's what I am questioning. Being called from {suspend/resume}_noirq() callbacks (if that's the code path this check caters for) does not mean irqs_disabled() == true. Agree. Actually, I got a hint of having this check from the following. Both tegra_bpmp_transfer_atomic() and tegra_bpmp_transfer() are indirectly called by APIs registered with .master_xfer() and .master_xfer_atomic() hooks of struct i2c_algorithm and the decision to call which one of these is made using the following check in i2c-core.h file. static inline bool i2c_in_atomic_xfer_mode(void) { return system_state > SYSTEM_RUNNING && irqs_disabled(); } I think I should use this condition as is IIUC. Please let me know if there are any concerns with this. It is not a concern, it is just that I don't understand how this code can be called with IRQs disabled, if you can give me an execution path I am happy to leave the check there. On top of that, when called from suspend NOIRQ context, it is likely to use the blocking API (because IRQs aren't disabled at CPU level) behind which there is most certainly an IRQ required to wake the thread up and if the IRQ in question was disabled in the suspend NOIRQ phase this code is likely to deadlock. I want to make sure we can justify adding this check, I do not want to add it because we think it can be needed when it may not be needed at all (and it gets copy and pasted over and over again in other drivers). I had a discussion internally about this and the prescribed usage of these APIs seem to be that use tegra_bpmp_transfer() in .probe() and other paths where interrupts are enabled as this API needs interrupts to be enabled for its working. Use tegra_bpmp_transfer_atomic() surrounded by local_irq_save()/local_irq_restore() in other paths where interrupt servicing is disabled. Why tegra_bpmp_transfer_atomic() needs IRQs to be disabled ? And why is it needed in this piece of code where IRQs are _never_ disabled at CPU level ? IRQs are enabled when you call a suspend_noirq() callback, so the blocking API can be used as long as the IRQ descriptor backing the IRQ that will wake-up the blocked call is marked as IRQF_NO_SUSPEND. The problem is not IRQs enabled/disabled at CPU level, the problem is the IRQ descriptor of the IRQ required to handle the blocking BPMP call, mark it as IRQF_NO_SUSPEND and remove the tegra_bpmp_transfer_atomic() call from this code (or please give me a concrete example pinpointing why it is needed). Ideally, using tegra_bpmp_transfer() alone in all paths (.probe() as well as .resume_noirq()) should have worked as the corresponding IRQ is already flagged as IRQF_NO_SUSPEND, but, because of the way BPMP-FW driver in kernel making its interface available through .resume_early(), tegra_bpmp_transfer() wasn't working as expected and I pushed a patch (CC'ing you) at http://patchwork.ozlabs.org/patch/1140973/ to make it .resume_noirq() from .resume_early(). With that in place, we can just use tegra_bpmp_trasnfer(). I'll push a new patch with this change once my BPMP-FW driver patch is approved. Thanks, Vidya Sagar Thanks, Lorenzo I'll go ahead and make next patch series with this if this looks fine to you. Actually, if tegra_bpmp_transfer() requires IRQs to be enabled you may even end up in a situation where that blocking call does not wake up because the IRQ in question was disabled in the NOIRQ suspend/resume phase. [...] +static int tegra_pcie_dw_probe(struct platform_device *pdev) +{ + const struct tegra_pcie_soc *data; +
Re: [PATCH V13 12/12] PCI: tegra: Add Tegra194 PCIe support
On Tue, Jul 23, 2019 at 08:14:08PM +0530, Vidya Sagar wrote: > On 7/16/2019 4:52 PM, Lorenzo Pieralisi wrote: > > On Sat, Jul 13, 2019 at 12:34:34PM +0530, Vidya Sagar wrote: > > > > [...] > > > > > > > > > +static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw > > > > > > > *pcie, > > > > > > > + bool enable) > > > > > > > +{ > > > > > > > + struct mrq_uphy_response resp; > > > > > > > + struct tegra_bpmp_message msg; > > > > > > > + struct mrq_uphy_request req; > > > > > > > + int err; > > > > > > > + > > > > > > > + if (pcie->cid == 5) > > > > > > > + return 0; > > > > > > > > > > > > What's wrong with cid == 5 ? Explain please. > > > > > Controller with ID=5 doesn't need any programming to enable it which > > > > > is > > > > > done here through calling firmware API. > > > > > > > > > > > > > > > > > > + memset(&req, 0, sizeof(req)); > > > > > > > + memset(&resp, 0, sizeof(resp)); > > > > > > > + > > > > > > > + req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE; > > > > > > > + req.controller_state.pcie_controller = pcie->cid; > > > > > > > + req.controller_state.enable = enable; > > > > > > > + > > > > > > > + memset(&msg, 0, sizeof(msg)); > > > > > > > + msg.mrq = MRQ_UPHY; > > > > > > > + msg.tx.data = &req; > > > > > > > + msg.tx.size = sizeof(req); > > > > > > > + msg.rx.data = &resp; > > > > > > > + msg.rx.size = sizeof(resp); > > > > > > > + > > > > > > > + if (irqs_disabled()) > > > > > > > > > > > > Can you explain to me what this check is meant to achieve please ? > > > > > Firmware interface provides different APIs to be called when there are > > > > > no interrupts enabled in the system (noirq context) and otherwise > > > > > hence checking that situation here and calling appropriate API. > > > > > > > > That's what I am questioning. Being called from {suspend/resume}_noirq() > > > > callbacks (if that's the code path this check caters for) does not mean > > > > irqs_disabled() == true. > > > Agree. > > > Actually, I got a hint of having this check from the following. > > > Both tegra_bpmp_transfer_atomic() and tegra_bpmp_transfer() are indirectly > > > called by APIs registered with .master_xfer() and .master_xfer_atomic() > > > hooks of > > > struct i2c_algorithm and the decision to call which one of these is made > > > using the > > > following check in i2c-core.h file. > > > static inline bool i2c_in_atomic_xfer_mode(void) > > > { > > > return system_state > SYSTEM_RUNNING && irqs_disabled(); > > > } > > > I think I should use this condition as is IIUC. > > > Please let me know if there are any concerns with this. > > > > It is not a concern, it is just that I don't understand how this code > > can be called with IRQs disabled, if you can give me an execution path I > > am happy to leave the check there. On top of that, when called from > > suspend NOIRQ context, it is likely to use the blocking API (because > > IRQs aren't disabled at CPU level) behind which there is most certainly > > an IRQ required to wake the thread up and if the IRQ in question was > > disabled in the suspend NOIRQ phase this code is likely to deadlock. > > > > I want to make sure we can justify adding this check, I do not > > want to add it because we think it can be needed when it may not > > be needed at all (and it gets copy and pasted over and over again > > in other drivers). > I had a discussion internally about this and the prescribed usage of these > APIs > seem to be that > use tegra_bpmp_transfer() in .probe() and other paths where interrupts are > enabled as this API needs interrupts to be enabled for its working. > Use tegra_bpmp_transfer_atomic() surrounded by > local_irq_save()/local_irq_restore() > in other paths where interrupt servicing is disabled. Why tegra_bpmp_transfer_atomic() needs IRQs to be disabled ? And why is it needed in this piece of code where IRQs are _never_ disabled at CPU level ? IRQs are enabled when you call a suspend_noirq() callback, so the blocking API can be used as long as the IRQ descriptor backing the IRQ that will wake-up the blocked call is marked as IRQF_NO_SUSPEND. The problem is not IRQs enabled/disabled at CPU level, the problem is the IRQ descriptor of the IRQ required to handle the blocking BPMP call, mark it as IRQF_NO_SUSPEND and remove the tegra_bpmp_transfer_atomic() call from this code (or please give me a concrete example pinpointing why it is needed). Thanks, Lorenzo > I'll go ahead and make next patch series with this if this looks fine to you. > > > > > > > Actually, if tegra_bpmp_transfer() requires IRQs to be enabled you may > > > > even end up in a situation where that blocking call does not wake up > > > > because the IRQ in question was disabled in the NOIRQ suspend/resume > > > > phase. > > > > > > > > [...] > > > > > > > > > > > +static int tegra_pcie_dw_probe(struct platform_device *pdev) > > > > > > > +{ > > > > > > > + const struct tegra_pcie_soc *data; > > > > > > > + struct
Re: [PATCH V13 12/12] PCI: tegra: Add Tegra194 PCIe support
On 7/16/2019 4:52 PM, Lorenzo Pieralisi wrote: On Sat, Jul 13, 2019 at 12:34:34PM +0530, Vidya Sagar wrote: [...] +static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, + bool enable) +{ + struct mrq_uphy_response resp; + struct tegra_bpmp_message msg; + struct mrq_uphy_request req; + int err; + + if (pcie->cid == 5) + return 0; What's wrong with cid == 5 ? Explain please. Controller with ID=5 doesn't need any programming to enable it which is done here through calling firmware API. + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE; + req.controller_state.pcie_controller = pcie->cid; + req.controller_state.enable = enable; + + memset(&msg, 0, sizeof(msg)); + msg.mrq = MRQ_UPHY; + msg.tx.data = &req; + msg.tx.size = sizeof(req); + msg.rx.data = &resp; + msg.rx.size = sizeof(resp); + + if (irqs_disabled()) Can you explain to me what this check is meant to achieve please ? Firmware interface provides different APIs to be called when there are no interrupts enabled in the system (noirq context) and otherwise hence checking that situation here and calling appropriate API. That's what I am questioning. Being called from {suspend/resume}_noirq() callbacks (if that's the code path this check caters for) does not mean irqs_disabled() == true. Agree. Actually, I got a hint of having this check from the following. Both tegra_bpmp_transfer_atomic() and tegra_bpmp_transfer() are indirectly called by APIs registered with .master_xfer() and .master_xfer_atomic() hooks of struct i2c_algorithm and the decision to call which one of these is made using the following check in i2c-core.h file. static inline bool i2c_in_atomic_xfer_mode(void) { return system_state > SYSTEM_RUNNING && irqs_disabled(); } I think I should use this condition as is IIUC. Please let me know if there are any concerns with this. It is not a concern, it is just that I don't understand how this code can be called with IRQs disabled, if you can give me an execution path I am happy to leave the check there. On top of that, when called from suspend NOIRQ context, it is likely to use the blocking API (because IRQs aren't disabled at CPU level) behind which there is most certainly an IRQ required to wake the thread up and if the IRQ in question was disabled in the suspend NOIRQ phase this code is likely to deadlock. I want to make sure we can justify adding this check, I do not want to add it because we think it can be needed when it may not be needed at all (and it gets copy and pasted over and over again in other drivers). I had a discussion internally about this and the prescribed usage of these APIs seem to be that use tegra_bpmp_transfer() in .probe() and other paths where interrupts are enabled as this API needs interrupts to be enabled for its working. Use tegra_bpmp_transfer_atomic() surrounded by local_irq_save()/local_irq_restore() in other paths where interrupt servicing is disabled. I'll go ahead and make next patch series with this if this looks fine to you. Actually, if tegra_bpmp_transfer() requires IRQs to be enabled you may even end up in a situation where that blocking call does not wake up because the IRQ in question was disabled in the NOIRQ suspend/resume phase. [...] +static int tegra_pcie_dw_probe(struct platform_device *pdev) +{ + const struct tegra_pcie_soc *data; + struct device *dev = &pdev->dev; + struct resource *atu_dma_res; + struct tegra_pcie_dw *pcie; + struct resource *dbi_res; + struct pcie_port *pp; + struct dw_pcie *pci; + struct phy **phys; + char *name; + int ret; + u32 i; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pci = &pcie->pci; + pci->dev = &pdev->dev; + pci->ops = &tegra_dw_pcie_ops; + pp = &pci->pp; + pcie->dev = &pdev->dev; + + data = (struct tegra_pcie_soc *)of_device_get_match_data(dev); + if (!data) + return -EINVAL; + pcie->mode = (enum dw_pcie_device_mode)data->mode; + + ret = tegra_pcie_dw_parse_dt(pcie); + if (ret < 0) { + dev_err(dev, "Failed to parse device tree: %d\n", ret); + return ret; + } + + pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl"); + if (IS_ERR(pcie->pex_ctl_supply)) { + dev_err(dev, "Failed to get regulator: %ld\n", + PTR_ERR(pcie->pex_ctl_supply)); + return PTR_ERR(pcie->pex_ctl_supply); + } + + pcie->core_clk = devm_clk_get(dev, "core"); + if (IS_ERR(pcie->core_clk)) { + dev_err(dev, "Failed to get core clock: %ld\n", + PTR_ERR(pcie->core_clk)); +
RE: [PATCH V13 12/12] PCI: tegra: Add Tegra194 PCIe support
-Original Message- From: devicetree-ow...@vger.kernel.org On Behalf Of Bjorn Helgaas Sent: Wednesday, July 17, 2019 12:30 AM To: Lorenzo Pieralisi Cc: Vidya Sagar ; robh...@kernel.org; mark.rutl...@arm.com; thierry.red...@gmail.com; Jonathan Hunter ; kis...@ti.com; catalin.mari...@arm.com; will.dea...@arm.com; jingooh...@gmail.com; gustavo.pimen...@synopsys.com; dig...@gmail.com; Mikko Perttunen ; linux-...@vger.kernel.org; devicet...@vger.kernel.org; linux-te...@vger.kernel.org; linux- ker...@vger.kernel.org; linux-arm-ker...@lists.infradead.org; Krishna Thota ; Manikanta Maddireddy ; sagar...@gmail.com Subject: Re: [PATCH V13 12/12] PCI: tegra: Add Tegra194 PCIe support On Tue, Jul 16, 2019 at 12:22:25PM +0100, Lorenzo Pieralisi wrote: > On Sat, Jul 13, 2019 at 12:34:34PM +0530, Vidya Sagar wrote: > > > > > So if the link is not up we still go ahead and make probe > > > > > succeed. What for ? > > > > We may need root port to be available to support hot-plugging of > > > > endpoint devices, so, we don't fail the probe. > > > > > > We need it or we don't. If you do support hotplugging of endpoint > > > devices point me at the code, otherwise link up failure means > > > failure to probe. > > Currently hotplugging of endpoint is not supported, but it is one of > > the use cases that we may add support for in future. > > You should elaborate on this, I do not understand what you mean, > either the root port(s) supports hotplug or it does not. > > > But, why should we fail probe if link up doesn't happen? As such, > > nothing went wrong in terms of root port initialization right? I > > checked other DWC based implementations and following are not > > failing the probe pci-dra7xx.c, pcie-armada8k.c, pcie-artpec6.c, > > pcie-histb.c, pcie-kirin.c, pcie-spear13xx.c, pci-exynos.c, > > pci-imx6.c, pci-keystone.c, pci-layerscape.c > > > > Although following do fail the probe if link is not up. > > pcie-qcom.c, pcie-uniphier.c, pci-meson.c > > > > So, to me, it looks more like a choice we can make whether to fail > > the probe or not and in this case we are choosing not to fail. > > I disagree. I had an offline chat with Bjorn and whether link-up > should fail the probe or not depends on whether the root port(s) is > hotplug capable or not and this in turn relies on the root port "Slot > implemented" bit in the PCI Express capabilities register. There might be a little more we can talk about in this regard. I did bring up the "Slot implemented" bit, but after thinking about it more, I don't really think the host bridge driver should be looking at that. That's a PCIe concept, and it's really *downstream* from the host bridge itself. The host bridge is logically a device on the CPU bus, not the PCI bus. I'm starting to think that the host bridge driver probe should be disconnected from question of whether the root port links are up. Logically, the host bridge driver connects the CPU bus to a PCI root bus, so it converts CPU-side accesses to PCI config, memory, or I/O port transactions. Given that, the PCI core can enumerate devices on the root bus and downstream buses. Devices on the root bus typically include Root Ports, but might also include endpoints, Root Complex Integrated Endpoints, Root Complex Event Collectors, etc. I think in principle, we would want the host bridge probe to succeed so we can use these devices even if none of the Root Ports have a link. If a Root Port is present, I think users will expect to see it in the "lspci" output, even if its downstream link is not up. That will enable things like manually poking the Root Port via "setpci" for debug. And if it has a connector, the generic pciehp should be able to handle hot-add events without any special help from the host bridge driver. On ACPI systems there is no concept of the host bridge driver probe failing because of lack of link on a Root Port. If a Root Port doesn't have an operational link, we still keep the pci_root.c driver, and we'll enumerate the Root Port itself. So I tend to think DT systems should behave the same way, i.e., the driver probe should succeed unless it fails to allocate resources or something similar. I think this is analogous to a NIC or USB adapter driver, where the probe succeeds even if there's no network cable or USB device attached. Bjorn Thanks Bjorn for your valuable inputs. I hope we are good here to not power down host even if there are no endpoints detected. - Vidya Sagar
Re: [PATCH V13 12/12] PCI: tegra: Add Tegra194 PCIe support
On Tue, Jul 16, 2019 at 12:22:25PM +0100, Lorenzo Pieralisi wrote: > On Sat, Jul 13, 2019 at 12:34:34PM +0530, Vidya Sagar wrote: > > > > > So if the link is not up we still go ahead and make probe > > > > > succeed. What for ? > > > > We may need root port to be available to support hot-plugging of > > > > endpoint devices, so, we don't fail the probe. > > > > > > We need it or we don't. If you do support hotplugging of endpoint > > > devices point me at the code, otherwise link up failure means > > > failure to probe. > > Currently hotplugging of endpoint is not supported, but it is one of > > the use cases that we may add support for in future. > > You should elaborate on this, I do not understand what you mean, > either the root port(s) supports hotplug or it does not. > > > But, why should we fail probe if link up doesn't happen? As such, > > nothing went wrong in terms of root port initialization right? I > > checked other DWC based implementations and following are not failing > > the probe pci-dra7xx.c, pcie-armada8k.c, pcie-artpec6.c, pcie-histb.c, > > pcie-kirin.c, pcie-spear13xx.c, pci-exynos.c, pci-imx6.c, > > pci-keystone.c, pci-layerscape.c > > > > Although following do fail the probe if link is not up. pcie-qcom.c, > > pcie-uniphier.c, pci-meson.c > > > > So, to me, it looks more like a choice we can make whether to fail the > > probe or not and in this case we are choosing not to fail. > > I disagree. I had an offline chat with Bjorn and whether link-up should > fail the probe or not depends on whether the root port(s) is hotplug > capable or not and this in turn relies on the root port "Slot > implemented" bit in the PCI Express capabilities register. There might be a little more we can talk about in this regard. I did bring up the "Slot implemented" bit, but after thinking about it more, I don't really think the host bridge driver should be looking at that. That's a PCIe concept, and it's really *downstream* from the host bridge itself. The host bridge is logically a device on the CPU bus, not the PCI bus. I'm starting to think that the host bridge driver probe should be disconnected from question of whether the root port links are up. Logically, the host bridge driver connects the CPU bus to a PCI root bus, so it converts CPU-side accesses to PCI config, memory, or I/O port transactions. Given that, the PCI core can enumerate devices on the root bus and downstream buses. Devices on the root bus typically include Root Ports, but might also include endpoints, Root Complex Integrated Endpoints, Root Complex Event Collectors, etc. I think in principle, we would want the host bridge probe to succeed so we can use these devices even if none of the Root Ports have a link. If a Root Port is present, I think users will expect to see it in the "lspci" output, even if its downstream link is not up. That will enable things like manually poking the Root Port via "setpci" for debug. And if it has a connector, the generic pciehp should be able to handle hot-add events without any special help from the host bridge driver. On ACPI systems there is no concept of the host bridge driver probe failing because of lack of link on a Root Port. If a Root Port doesn't have an operational link, we still keep the pci_root.c driver, and we'll enumerate the Root Port itself. So I tend to think DT systems should behave the same way, i.e., the driver probe should succeed unless it fails to allocate resources or something similar. I think this is analogous to a NIC or USB adapter driver, where the probe succeeds even if there's no network cable or USB device attached. Bjorn
Re: [PATCH V13 12/12] PCI: tegra: Add Tegra194 PCIe support
On Sat, Jul 13, 2019 at 12:34:34PM +0530, Vidya Sagar wrote: [...] > > > > > +static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, > > > > > + bool enable) > > > > > +{ > > > > > + struct mrq_uphy_response resp; > > > > > + struct tegra_bpmp_message msg; > > > > > + struct mrq_uphy_request req; > > > > > + int err; > > > > > + > > > > > + if (pcie->cid == 5) > > > > > + return 0; > > > > > > > > What's wrong with cid == 5 ? Explain please. > > > Controller with ID=5 doesn't need any programming to enable it which is > > > done here through calling firmware API. > > > > > > > > > > > > + memset(&req, 0, sizeof(req)); > > > > > + memset(&resp, 0, sizeof(resp)); > > > > > + > > > > > + req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE; > > > > > + req.controller_state.pcie_controller = pcie->cid; > > > > > + req.controller_state.enable = enable; > > > > > + > > > > > + memset(&msg, 0, sizeof(msg)); > > > > > + msg.mrq = MRQ_UPHY; > > > > > + msg.tx.data = &req; > > > > > + msg.tx.size = sizeof(req); > > > > > + msg.rx.data = &resp; > > > > > + msg.rx.size = sizeof(resp); > > > > > + > > > > > + if (irqs_disabled()) > > > > > > > > Can you explain to me what this check is meant to achieve please ? > > > Firmware interface provides different APIs to be called when there are > > > no interrupts enabled in the system (noirq context) and otherwise > > > hence checking that situation here and calling appropriate API. > > > > That's what I am questioning. Being called from {suspend/resume}_noirq() > > callbacks (if that's the code path this check caters for) does not mean > > irqs_disabled() == true. > Agree. > Actually, I got a hint of having this check from the following. > Both tegra_bpmp_transfer_atomic() and tegra_bpmp_transfer() are indirectly > called by APIs registered with .master_xfer() and .master_xfer_atomic() hooks > of > struct i2c_algorithm and the decision to call which one of these is made > using the > following check in i2c-core.h file. > static inline bool i2c_in_atomic_xfer_mode(void) > { > return system_state > SYSTEM_RUNNING && irqs_disabled(); > } > I think I should use this condition as is IIUC. > Please let me know if there are any concerns with this. It is not a concern, it is just that I don't understand how this code can be called with IRQs disabled, if you can give me an execution path I am happy to leave the check there. On top of that, when called from suspend NOIRQ context, it is likely to use the blocking API (because IRQs aren't disabled at CPU level) behind which there is most certainly an IRQ required to wake the thread up and if the IRQ in question was disabled in the suspend NOIRQ phase this code is likely to deadlock. I want to make sure we can justify adding this check, I do not want to add it because we think it can be needed when it may not be needed at all (and it gets copy and pasted over and over again in other drivers). > > Actually, if tegra_bpmp_transfer() requires IRQs to be enabled you may > > even end up in a situation where that blocking call does not wake up > > because the IRQ in question was disabled in the NOIRQ suspend/resume > > phase. > > > > [...] > > > > > > > +static int tegra_pcie_dw_probe(struct platform_device *pdev) > > > > > +{ > > > > > + const struct tegra_pcie_soc *data; > > > > > + struct device *dev = &pdev->dev; > > > > > + struct resource *atu_dma_res; > > > > > + struct tegra_pcie_dw *pcie; > > > > > + struct resource *dbi_res; > > > > > + struct pcie_port *pp; > > > > > + struct dw_pcie *pci; > > > > > + struct phy **phys; > > > > > + char *name; > > > > > + int ret; > > > > > + u32 i; > > > > > + > > > > > + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); > > > > > + if (!pcie) > > > > > + return -ENOMEM; > > > > > + > > > > > + pci = &pcie->pci; > > > > > + pci->dev = &pdev->dev; > > > > > + pci->ops = &tegra_dw_pcie_ops; > > > > > + pp = &pci->pp; > > > > > + pcie->dev = &pdev->dev; > > > > > + > > > > > + data = (struct tegra_pcie_soc *)of_device_get_match_data(dev); > > > > > + if (!data) > > > > > + return -EINVAL; > > > > > + pcie->mode = (enum dw_pcie_device_mode)data->mode; > > > > > + > > > > > + ret = tegra_pcie_dw_parse_dt(pcie); > > > > > + if (ret < 0) { > > > > > + dev_err(dev, "Failed to parse device tree: %d\n", ret); > > > > > + return ret; > > > > > + } > > > > > + > > > > > + pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl"); > > > > > + if (IS_ERR(pcie->pex_ctl_supply)) { > > > > > + dev_err(dev, "Failed to get regulator: %ld\n", > > > > > + PTR_ERR(pcie->pex_ctl_supply)); > > > > > + return PTR_ERR(pcie->pex_ctl_supply); > > > > > + } > > > > > + > > > > > +
Re: [PATCH V13 12/12] PCI: tegra: Add Tegra194 PCIe support
On 7/12/2019 9:37 PM, Lorenzo Pieralisi wrote: On Fri, Jul 12, 2019 at 09:02:49PM +0530, Vidya Sagar wrote: [...] +static irqreturn_t tegra_pcie_irq_handler(int irq, void *arg) +{ + struct tegra_pcie_dw *pcie = arg; + + if (pcie->mode == DW_PCIE_RC_TYPE) + return tegra_pcie_rp_irq_handler(pcie); What's the point of registering the handler if mode != DW_PCIE_RC_TYPE ? Currently this driver supports only root port mode but we have a plan to add support for endpoint mode (as Tegra194 as dual mode controllers) also in future and when that happens, we'll have a corresponding tegra_pcie_ep_irq_handler() to take care of ep specific interrupts. Sure, that's why you should add tegra_pcie_dw->mode when it is needed, not in this patch. Ok. [...] +static int tegra_pcie_dw_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + u32 val, tmp, offset, speed; + unsigned int count; + u16 val_w; + +core_init: I think it would be cleaner to include all registers programming within a function and we remove this label (and goto) below. Some background: As per spec r4.0 v1.0, downstream ports that support 16.0 GT/s must support Scaled Flow Control (sec 3.4.2) and Tegra194's downstream ports being 16.0 GT/s capable, enable scaled flow control by having Data Link Feature (sec 7.7.4) enabled by default. There is one endpoint (ASMedia USB3.0 controller) that doesn't link up with root port because of this (i.e. DLF being enabled). The way we are detecting this situation is to check for partial linkup i.e. one of application logic registers (i.e. from "appl" region) says link is up but the same is not reflected in configuration space of root port. Recommendation from our hardware team in this situation is to disable DLF in root port and try link up with endpoint again. To achieve this, we put the core through reset cycle, disable DLF and proceed with configuring all other registers and check for link up. Initially in Patch-1, I didn't have goto statement but a recursion with depth-1 (as the above situation occurs only once). It was reviewed @ http://patchwork.ozlabs.org/patch/1065707/ and Thierry said it would be simpler to use a goto instead of calling the same function again. So, I modified the code accordingly. Please do let me know if you strongly feel we should call tegra_pcie_dw_host_init() instead of goto here. I'll change it. I did not say we should call tegra_pcie_dw_host_init(), sorry for not being clear. What I asked is factoring out registers programming in a function and call it where core_init: label is and call it again if DLF enablement causes link up to fail. Ok. [...] +static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, + bool enable) +{ + struct mrq_uphy_response resp; + struct tegra_bpmp_message msg; + struct mrq_uphy_request req; + int err; + + if (pcie->cid == 5) + return 0; What's wrong with cid == 5 ? Explain please. Controller with ID=5 doesn't need any programming to enable it which is done here through calling firmware API. + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE; + req.controller_state.pcie_controller = pcie->cid; + req.controller_state.enable = enable; + + memset(&msg, 0, sizeof(msg)); + msg.mrq = MRQ_UPHY; + msg.tx.data = &req; + msg.tx.size = sizeof(req); + msg.rx.data = &resp; + msg.rx.size = sizeof(resp); + + if (irqs_disabled()) Can you explain to me what this check is meant to achieve please ? Firmware interface provides different APIs to be called when there are no interrupts enabled in the system (noirq context) and otherwise hence checking that situation here and calling appropriate API. That's what I am questioning. Being called from {suspend/resume}_noirq() callbacks (if that's the code path this check caters for) does not mean irqs_disabled() == true. Agree. Actually, I got a hint of having this check from the following. Both tegra_bpmp_transfer_atomic() and tegra_bpmp_transfer() are indirectly called by APIs registered with .master_xfer() and .master_xfer_atomic() hooks of struct i2c_algorithm and the decision to call which one of these is made using the following check in i2c-core.h file. static inline bool i2c_in_atomic_xfer_mode(void) { return system_state > SYSTEM_RUNNING && irqs_disabled(); } I think I should use this condition as is IIUC. Please let me know if there are any concerns with this. Actually, if tegra_bpmp_transfer() requires IRQs to be enabled you may even end up in a situation where that blocking call does not wake up because the IRQ in question was disabled in the NOIRQ suspend/resume phase. [...] +static int tegra_pcie_dw_probe(struct platfor
Re: [PATCH V13 12/12] PCI: tegra: Add Tegra194 PCIe support
On Fri, Jul 12, 2019 at 09:02:49PM +0530, Vidya Sagar wrote: [...] > > > +static irqreturn_t tegra_pcie_irq_handler(int irq, void *arg) > > > +{ > > > + struct tegra_pcie_dw *pcie = arg; > > > + > > > + if (pcie->mode == DW_PCIE_RC_TYPE) > > > + return tegra_pcie_rp_irq_handler(pcie); > > > > What's the point of registering the handler if mode != DW_PCIE_RC_TYPE ? > Currently this driver supports only root port mode but we have a plan > to add support for endpoint mode (as Tegra194 as dual mode > controllers) also in future and when that happens, we'll have a > corresponding tegra_pcie_ep_irq_handler() to take care of ep specific > interrupts. Sure, that's why you should add tegra_pcie_dw->mode when it is needed, not in this patch. [...] > > > +static int tegra_pcie_dw_host_init(struct pcie_port *pp) > > > +{ > > > + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); > > > + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); > > > + u32 val, tmp, offset, speed; > > > + unsigned int count; > > > + u16 val_w; > > > + > > > +core_init: > > > > I think it would be cleaner to include all registers programming > > within a function and we remove this label (and goto) below. > Some background: As per spec r4.0 v1.0, downstream ports that support > 16.0 GT/s must support Scaled Flow Control (sec 3.4.2) and Tegra194's > downstream ports being 16.0 GT/s capable, enable scaled flow control > by having Data Link Feature (sec 7.7.4) enabled by default. There is > one endpoint (ASMedia USB3.0 controller) that doesn't link up with > root port because of this (i.e. DLF being enabled). The way we are > detecting this situation is to check for partial linkup i.e. one of > application logic registers (i.e. from "appl" region) says link is up > but the same is not reflected in configuration space of root port. > Recommendation from our hardware team in this situation is to disable > DLF in root port and try link up with endpoint again. To achieve > this, we put the core through reset cycle, disable DLF and proceed > with configuring all other registers and check for link up. > > Initially in Patch-1, I didn't have goto statement but a recursion > with depth-1 (as the above situation occurs only once). It was > reviewed @ http://patchwork.ozlabs.org/patch/1065707/ and Thierry said > it would be simpler to use a goto instead of calling the same function > again. So, I modified the code accordingly. Please do let me know if > you strongly feel we should call tegra_pcie_dw_host_init() instead of > goto here. I'll change it. I did not say we should call tegra_pcie_dw_host_init(), sorry for not being clear. What I asked is factoring out registers programming in a function and call it where core_init: label is and call it again if DLF enablement causes link up to fail. [...] > > > +static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, > > > + bool enable) > > > +{ > > > + struct mrq_uphy_response resp; > > > + struct tegra_bpmp_message msg; > > > + struct mrq_uphy_request req; > > > + int err; > > > + > > > + if (pcie->cid == 5) > > > + return 0; > > > > What's wrong with cid == 5 ? Explain please. > Controller with ID=5 doesn't need any programming to enable it which is > done here through calling firmware API. > > > > > > + memset(&req, 0, sizeof(req)); > > > + memset(&resp, 0, sizeof(resp)); > > > + > > > + req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE; > > > + req.controller_state.pcie_controller = pcie->cid; > > > + req.controller_state.enable = enable; > > > + > > > + memset(&msg, 0, sizeof(msg)); > > > + msg.mrq = MRQ_UPHY; > > > + msg.tx.data = &req; > > > + msg.tx.size = sizeof(req); > > > + msg.rx.data = &resp; > > > + msg.rx.size = sizeof(resp); > > > + > > > + if (irqs_disabled()) > > > > Can you explain to me what this check is meant to achieve please ? > Firmware interface provides different APIs to be called when there are > no interrupts enabled in the system (noirq context) and otherwise > hence checking that situation here and calling appropriate API. That's what I am questioning. Being called from {suspend/resume}_noirq() callbacks (if that's the code path this check caters for) does not mean irqs_disabled() == true. Actually, if tegra_bpmp_transfer() requires IRQs to be enabled you may even end up in a situation where that blocking call does not wake up because the IRQ in question was disabled in the NOIRQ suspend/resume phase. [...] > > > +static int tegra_pcie_dw_probe(struct platform_device *pdev) > > > +{ > > > + const struct tegra_pcie_soc *data; > > > + struct device *dev = &pdev->dev; > > > + struct resource *atu_dma_res; > > > + struct tegra_pcie_dw *pcie; > > > + struct resource *dbi_res; > > > + struct pcie_port *pp; > > > + struct dw_pcie *pci; > > > + struct phy **phys; > > > + char *name; > > > + int ret; > > > + u32 i; > > > + > > > + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); > > > + if (!pcie) > > > + retur
Re: [PATCH V13 12/12] PCI: tegra: Add Tegra194 PCIe support
On 7/11/2019 6:24 PM, Lorenzo Pieralisi wrote: On Wed, Jul 10, 2019 at 11:52:12AM +0530, Vidya Sagar wrote: [...] diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c new file mode 100644 index ..189779edd976 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -0,0 +1,1632 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * PCIe host controller driver for Tegra194 SoC + * + * Copyright (C) 2019 NVIDIA Corporation. + * + * Author: Vidya Sagar + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include What do you need these two headers for ? These were added for a different reason. They are not required and I'll remove them. [...] +struct tegra_pcie_dw { + struct device *dev; + struct resource *appl_res; + struct resource *dbi_res; + struct resource *atu_dma_res; + void __iomem *appl_base; + struct clk *core_clk; + struct reset_control *core_apb_rst; + struct reset_control *core_rst; + struct dw_pcie pci; + enum dw_pcie_device_mode mode; + struct tegra_bpmp *bpmp; + + bool supports_clkreq; + bool enable_cdm_check; + u8 init_link_width; + bool link_state; + u32 msi_ctrl_int; + u32 num_lanes; + u32 max_speed; + u32 cid; + bool update_fc_fixup; + u32 cfg_link_cap_l1sub; + u32 pcie_cap_base; + u32 aspm_cmrt; + u32 aspm_pwr_on_t; + u32 aspm_l0s_enter_lat;o Nit: You should try to group variables according to their usage, it would make sense to group booleans together. Ok. + struct regulator *pex_ctl_supply; + + unsigned int phy_count; + struct phy **phys; + + struct dentry *debugfs; +}; + +static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci) +{ + return container_of(pci, struct tegra_pcie_dw, pci); +} + +static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value, + const u32 reg) +{ + writel_relaxed(value, pcie->appl_base + reg); +} + +static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg) +{ + return readl_relaxed(pcie->appl_base + reg); +} + +struct tegra_pcie_soc { + enum dw_pcie_device_mode mode; +}; + +static void apply_bad_link_workaround(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + u32 current_link_width; + u16 val; + + /* +* NOTE:- Since this scenario is uncommon and link as such is not +* stable anyway, not waiting to confirm if link is really +* transitioning to Gen-2 speed +*/ + val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); + if (val & PCI_EXP_LNKSTA_LBMS) { + current_link_width = (val & PCI_EXP_LNKSTA_NLW) >> +PCI_EXP_LNKSTA_NLW_SHIFT; + if (pcie->init_link_width > current_link_width) { + dev_warn(pci->dev, "PCIe link is bad, width reduced\n"); + val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKCTL2); + val &= ~PCI_EXP_LNKCTL2_TLS; + val |= PCI_EXP_LNKCTL2_TLS_2_5GT; + dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKCTL2, val); + + val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKCTL); + val |= PCI_EXP_LNKCTL_RL; + dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKCTL, val); + } + } +} + +static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie) +{ + struct dw_pcie *pci = &pcie->pci; + struct pcie_port *pp = &pci->pp; + u32 val, tmp; + u16 val_w; + + val = appl_readl(pcie, APPL_INTR_STATUS_L0); + if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) { + val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); + if (val & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) { + appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0); + + /* SBR & Surprise Link Down WAR */ + val = appl_readl(pcie, APPL_CAR_RESET_OVRD); + val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; + appl_writel(pcie, val, APPL_CAR_RESET_OVRD); + udelay(1); + val = appl_readl(pcie, APPL_CAR_RESET_OVRD); + val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; + appl_writel(pcie, val, APPL_CAR_RESET_OVRD); + +
Re: [PATCH V13 12/12] PCI: tegra: Add Tegra194 PCIe support
On Wed, Jul 10, 2019 at 11:52:12AM +0530, Vidya Sagar wrote: [...] > diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c > b/drivers/pci/controller/dwc/pcie-tegra194.c > new file mode 100644 > index ..189779edd976 > --- /dev/null > +++ b/drivers/pci/controller/dwc/pcie-tegra194.c > @@ -0,0 +1,1632 @@ > +// SPDX-License-Identifier: GPL-2.0+ > +/* > + * PCIe host controller driver for Tegra194 SoC > + * > + * Copyright (C) 2019 NVIDIA Corporation. > + * > + * Author: Vidya Sagar > + */ > + > +#include > +#include > +#include > +#include > +#include > +#include > +#include > +#include > +#include What do you need these two headers for ? [...] > +struct tegra_pcie_dw { > + struct device *dev; > + struct resource *appl_res; > + struct resource *dbi_res; > + struct resource *atu_dma_res; > + void __iomem *appl_base; > + struct clk *core_clk; > + struct reset_control *core_apb_rst; > + struct reset_control *core_rst; > + struct dw_pcie pci; > + enum dw_pcie_device_mode mode; > + struct tegra_bpmp *bpmp; > + > + bool supports_clkreq; > + bool enable_cdm_check; > + u8 init_link_width; > + bool link_state; > + u32 msi_ctrl_int; > + u32 num_lanes; > + u32 max_speed; > + u32 cid; > + bool update_fc_fixup; > + u32 cfg_link_cap_l1sub; > + u32 pcie_cap_base; > + u32 aspm_cmrt; > + u32 aspm_pwr_on_t; > + u32 aspm_l0s_enter_lat;o Nit: You should try to group variables according to their usage, it would make sense to group booleans together. > + struct regulator *pex_ctl_supply; > + > + unsigned int phy_count; > + struct phy **phys; > + > + struct dentry *debugfs; > +}; > + > +static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci) > +{ > + return container_of(pci, struct tegra_pcie_dw, pci); > +} > + > +static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value, > +const u32 reg) > +{ > + writel_relaxed(value, pcie->appl_base + reg); > +} > + > +static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg) > +{ > + return readl_relaxed(pcie->appl_base + reg); > +} > + > +struct tegra_pcie_soc { > + enum dw_pcie_device_mode mode; > +}; > + > +static void apply_bad_link_workaround(struct pcie_port *pp) > +{ > + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); > + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); > + u32 current_link_width; > + u16 val; > + > + /* > + * NOTE:- Since this scenario is uncommon and link as such is not > + * stable anyway, not waiting to confirm if link is really > + * transitioning to Gen-2 speed > + */ > + val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); > + if (val & PCI_EXP_LNKSTA_LBMS) { > + current_link_width = (val & PCI_EXP_LNKSTA_NLW) >> > + PCI_EXP_LNKSTA_NLW_SHIFT; > + if (pcie->init_link_width > current_link_width) { > + dev_warn(pci->dev, "PCIe link is bad, width reduced\n"); > + val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + > + PCI_EXP_LNKCTL2); > + val &= ~PCI_EXP_LNKCTL2_TLS; > + val |= PCI_EXP_LNKCTL2_TLS_2_5GT; > + dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + > +PCI_EXP_LNKCTL2, val); > + > + val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + > + PCI_EXP_LNKCTL); > + val |= PCI_EXP_LNKCTL_RL; > + dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + > +PCI_EXP_LNKCTL, val); > + } > + } > +} > + > +static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie) > +{ > + struct dw_pcie *pci = &pcie->pci; > + struct pcie_port *pp = &pci->pp; > + u32 val, tmp; > + u16 val_w; > + > + val = appl_readl(pcie, APPL_INTR_STATUS_L0); > + if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) { > + val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); > + if (val & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) { > + appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0); > + > + /* SBR & Surprise Link Down WAR */ > + val = appl_readl(pcie, APPL_CAR_RESET_OVRD); > + val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; > + appl_writel(pcie, val, APPL_CAR_RESET_OVRD); > + udelay(1); > + val = appl_readl(pcie, APPL_CAR_RESET_OVRD); > + val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; > + appl_writel(pcie, val, APPL_CAR_RESET_OVRD); > + > + val = dw_pcie_readl_dbi(pci, PORT_L
Re: [PATCH V13 12/12] PCI: tegra: Add Tegra194 PCIe support
On 7/10/2019 10:32 PM, Lorenzo Pieralisi wrote: On Wed, Jul 10, 2019 at 11:52:12AM +0530, Vidya Sagar wrote: [...] +#if defined(CONFIG_PCIEASPM) +static void disable_aspm_l11(struct tegra_pcie_dw *pcie) +{ + u32 val; + + val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); + val &= ~PCI_L1SS_CAP_ASPM_L1_1; + dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); +} + +static void disable_aspm_l12(struct tegra_pcie_dw *pcie) +{ + u32 val; + + val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); + val &= ~PCI_L1SS_CAP_ASPM_L1_2; + dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); +} + +static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event) +{ + u32 val; + + val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid]); + val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT); + val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; + val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT; + val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; + dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val); + val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_data_offset[pcie->cid]); + return val; +} + +static int aspm_state_cnt(struct seq_file *s, void *data) +{ + struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *) +dev_get_drvdata(s->private); + u32 val; + + seq_printf(s, "Tx L0s entry count : %u\n", + event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S)); + + seq_printf(s, "Rx L0s entry count : %u\n", + event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S)); + + seq_printf(s, "Link L1 entry count : %u\n", + event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1)); + + seq_printf(s, "Link L1.1 entry count : %u\n", + event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1)); + + seq_printf(s, "Link L1.2 entry count : %u\n", + event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2)); + + /* Clear all counters */ + dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], + EVENT_COUNTER_ALL_CLEAR); + + /* Re-enable counting */ + val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; + val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; + dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val); + + return 0; +} +#endif + +static int init_debugfs(struct tegra_pcie_dw *pcie) +{ +#if defined(CONFIG_PCIEASPM) + struct dentry *d; + + d = debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", + pcie->debugfs, aspm_state_cnt); + if (IS_ERR_OR_NULL(d)) + dev_err(pcie->dev, + "Failed to create debugfs file \"aspm_state_cnt\"\n"); +#endif + return 0; +} I prefer writing it: #if defined(CONFIG_PCIEASPM) static void disable_aspm_l11(struct tegra_pcie_dw *pcie) { ... } static void disable_aspm_l12(struct tegra_pcie_dw *pcie) { ... } static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event) { ... } static int aspm_state_cnt(struct seq_file *s, void *data) { ... } static int init_debugfs(struct tegra_pcie_dw *pcie) { struct dentry *d; d = debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs, aspm_state_cnt); if (IS_ERR_OR_NULL(d)) dev_err(pcie->dev, "Failed to create debugfs file \"aspm_state_cnt\"\n"); return 0; } #else static inline int init_debugfs(struct tegra_pcie_dw *pcie) { return 0; } #endif I'll address it in the next patch. + +static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + u32 val; + u16 val_w; + + val = appl_readl(pcie, APPL_INTR_EN_L0_0); + val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L0_0); + + val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); + val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); + + if (pcie->enable_cdm_check) { + val = appl_readl(pcie, APPL_INTR_EN_L0_0); + val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L0_0); + + val = appl_readl(pcie, APPL_INTR_EN_L1_18); + val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR; + val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR; + appl_writel(pcie, val, APPL_INTR_EN_L1_18); + } + + val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + +
Re: [PATCH V13 12/12] PCI: tegra: Add Tegra194 PCIe support
On Wed, Jul 10, 2019 at 11:52:12AM +0530, Vidya Sagar wrote: [...] > +#if defined(CONFIG_PCIEASPM) > +static void disable_aspm_l11(struct tegra_pcie_dw *pcie) > +{ > + u32 val; > + > + val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); > + val &= ~PCI_L1SS_CAP_ASPM_L1_1; > + dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); > +} > + > +static void disable_aspm_l12(struct tegra_pcie_dw *pcie) > +{ > + u32 val; > + > + val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); > + val &= ~PCI_L1SS_CAP_ASPM_L1_2; > + dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); > +} > + > +static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event) > +{ > + u32 val; > + > + val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid]); > + val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT); > + val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; > + val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT; > + val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; > + dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val); > + val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_data_offset[pcie->cid]); > + return val; > +} > + > +static int aspm_state_cnt(struct seq_file *s, void *data) > +{ > + struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *) > + dev_get_drvdata(s->private); > + u32 val; > + > + seq_printf(s, "Tx L0s entry count : %u\n", > +event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S)); > + > + seq_printf(s, "Rx L0s entry count : %u\n", > +event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S)); > + > + seq_printf(s, "Link L1 entry count : %u\n", > +event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1)); > + > + seq_printf(s, "Link L1.1 entry count : %u\n", > +event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1)); > + > + seq_printf(s, "Link L1.2 entry count : %u\n", > +event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2)); > + > + /* Clear all counters */ > + dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], > +EVENT_COUNTER_ALL_CLEAR); > + > + /* Re-enable counting */ > + val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; > + val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; > + dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val); > + > + return 0; > +} > +#endif > + > +static int init_debugfs(struct tegra_pcie_dw *pcie) > +{ > +#if defined(CONFIG_PCIEASPM) > + struct dentry *d; > + > + d = debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", > + pcie->debugfs, aspm_state_cnt); > + if (IS_ERR_OR_NULL(d)) > + dev_err(pcie->dev, > + "Failed to create debugfs file \"aspm_state_cnt\"\n"); > +#endif > + return 0; > +} I prefer writing it: #if defined(CONFIG_PCIEASPM) static void disable_aspm_l11(struct tegra_pcie_dw *pcie) { ... } static void disable_aspm_l12(struct tegra_pcie_dw *pcie) { ... } static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event) { ... } static int aspm_state_cnt(struct seq_file *s, void *data) { ... } static int init_debugfs(struct tegra_pcie_dw *pcie) { struct dentry *d; d = debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs, aspm_state_cnt); if (IS_ERR_OR_NULL(d)) dev_err(pcie->dev, "Failed to create debugfs file \"aspm_state_cnt\"\n"); return 0; } #else static inline int init_debugfs(struct tegra_pcie_dw *pcie) { return 0; } #endif > + > +static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp) > +{ > + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); > + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); > + u32 val; > + u16 val_w; > + > + val = appl_readl(pcie, APPL_INTR_EN_L0_0); > + val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; > + appl_writel(pcie, val, APPL_INTR_EN_L0_0); > + > + val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); > + val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN; > + appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); > + > + if (pcie->enable_cdm_check) { > + val = appl_readl(pcie, APPL_INTR_EN_L0_0); > + val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN; > + appl_writel(pcie, val, APPL_INTR_EN_L0_0); > + > + val = appl_readl(pcie, APPL_INTR_EN_L1_18); > + val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR; > + val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR; > + appl_writel(pcie, val, APPL_INTR_EN_L1_18); > + } > + > + val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + > +