This patch adds Add struct plic_hw to represent global PLIC HW details.

Currently, these details are only used in plic_init() but in-future
these will be useful in implementing PM suspend and resume callbacks.

Further, these global details are good debug info about HW so let's
not throw them away after use in plic_init().

Signed-off-by: Anup Patel <a...@brainfault.org>
---
 drivers/irqchip/irq-sifive-plic.c | 59 +++++++++++++++++--------------
 1 file changed, 33 insertions(+), 26 deletions(-)

diff --git a/drivers/irqchip/irq-sifive-plic.c 
b/drivers/irqchip/irq-sifive-plic.c
index c23a293a2aae..48bee877e0f1 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -55,8 +55,6 @@
 #define     CONTEXT_THRESHOLD          0x00
 #define     CONTEXT_CLAIM              0x04
 
-static void __iomem *plic_regs;
-
 struct plic_handler {
        bool                    present;
        void __iomem            *hart_base;
@@ -67,8 +65,19 @@ struct plic_handler {
        raw_spinlock_t          enable_lock;
        void __iomem            *enable_base;
 };
+
 static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
 
+struct plic_hw {
+       u32                     nr_irqs;
+       u32                     nr_handlers;
+       u32                     nr_mapped;
+       void __iomem            *regs;
+       struct irq_domain       *irqdomain;
+};
+
+static struct plic_hw plic;
+
 static inline void plic_toggle(struct plic_handler *handler,
                                int hwirq, int enable)
 {
@@ -87,7 +96,7 @@ static inline void plic_irq_toggle(struct irq_data *d, int 
enable)
 {
        int cpu;
 
-       writel(enable, plic_regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
+       writel(enable, plic.regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
        for_each_cpu(cpu, irq_data_get_affinity_mask(d)) {
                struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
 
@@ -130,8 +139,6 @@ static const struct irq_domain_ops plic_irqdomain_ops = {
        .xlate          = irq_domain_xlate_onecell,
 };
 
-static struct irq_domain *plic_irqdomain;
-
 /*
  * Handling an interrupt is a two-step process: first you claim the interrupt
  * by reading the claim register, then you complete the interrupt by writing
@@ -148,7 +155,7 @@ static void plic_handle_irq(struct pt_regs *regs)
 
        csr_clear(sie, SIE_SEIE);
        while ((hwirq = readl(claim))) {
-               int irq = irq_find_mapping(plic_irqdomain, hwirq);
+               int irq = irq_find_mapping(plic.irqdomain, hwirq);
 
                if (unlikely(irq <= 0))
                        pr_warn_ratelimited("can't find mapping for hwirq 
%lu\n",
@@ -177,36 +184,35 @@ static int plic_find_hart_id(struct device_node *node)
 static int __init plic_init(struct device_node *node,
                struct device_node *parent)
 {
-       int error = 0, nr_handlers, nr_mapped = 0, i;
-       u32 nr_irqs;
+       int error = 0, i;
 
-       if (plic_regs) {
+       if (plic.regs) {
                pr_warn("PLIC already present.\n");
                return -ENXIO;
        }
 
-       plic_regs = of_iomap(node, 0);
-       if (WARN_ON(!plic_regs))
+       plic.regs = of_iomap(node, 0);
+       if (WARN_ON(!plic.regs))
                return -EIO;
 
        error = -EINVAL;
-       of_property_read_u32(node, "riscv,ndev", &nr_irqs);
-       if (WARN_ON(!nr_irqs))
+       of_property_read_u32(node, "riscv,ndev", &plic.nr_irqs);
+       if (WARN_ON(!plic.nr_irqs))
                goto out_iounmap;
 
-       nr_handlers = of_irq_count(node);
-       if (WARN_ON(!nr_handlers))
+       plic.nr_handlers = of_irq_count(node);
+       if (WARN_ON(!plic.nr_handlers))
                goto out_iounmap;
-       if (WARN_ON(nr_handlers < num_possible_cpus()))
+       if (WARN_ON(plic.nr_handlers < num_possible_cpus()))
                goto out_iounmap;
 
        error = -ENOMEM;
-       plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
-                       &plic_irqdomain_ops, NULL);
-       if (WARN_ON(!plic_irqdomain))
+       plic.irqdomain = irq_domain_add_linear(node, plic.nr_irqs + 1,
+                                               &plic_irqdomain_ops, NULL);
+       if (WARN_ON(!plic.irqdomain))
                goto out_iounmap;
 
-       for (i = 0; i < nr_handlers; i++) {
+       for (i = 0; i < plic.nr_handlers; i++) {
                struct of_phandle_args parent;
                struct plic_handler *handler;
                irq_hw_number_t hwirq;
@@ -231,25 +237,26 @@ static int __init plic_init(struct device_node *node,
                handler = per_cpu_ptr(&plic_handlers, cpu);
                handler->present = true;
                handler->hart_base =
-                       plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
+                       plic.regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
                raw_spin_lock_init(&handler->enable_lock);
                handler->enable_base =
-                       plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
+                       plic.regs + ENABLE_BASE + i * ENABLE_PER_HART;
 
                /* priority must be > threshold to trigger an interrupt */
                writel(0, handler->hart_base + CONTEXT_THRESHOLD);
-               for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
+               for (hwirq = 1; hwirq <= plic.nr_irqs; hwirq++)
                        plic_toggle(handler, hwirq, 0);
-               nr_mapped++;
+
+               plic.nr_mapped++;
        }
 
        pr_info("mapped %d interrupts to %d (out of %d) handlers.\n",
-               nr_irqs, nr_mapped, nr_handlers);
+               plic.nr_irqs, plic.nr_mapped, plic.nr_handlers);
        set_handle_irq(plic_handle_irq);
        return 0;
 
 out_iounmap:
-       iounmap(plic_regs);
+       iounmap(plic.regs);
        return error;
 }
 
-- 
2.17.1

Reply via email to