On 06/14/2016 06:37 PM, Sebastian Frias wrote:
>>>> Also, without seeing the code,
>>>> it is pretty difficult to make any meaningful comment...
>>>
>>> Base code is either 4.7rc1 or 4.4.
>>> The irq-crossbar code is not much different from TI, but you can find it 
>>> attached.
>>
>> Please post it separately (and inline), the email client I have here
>> makes it hard to review attached patches.
> 
> Ok, I'll post it in a separate email and inline.
> 

Here it goes:


#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/slab.h>


#define IRQ_MUX_INPUT_LINES  (128)
#define IRQ_MUX_OUTPUT_LINES (24)
#define IRQ_FREE             (-1)
#define IRQ_RESERVED         (-2)


/**
 * struct tangox_irq_mux : irq mux private driver data
 * @lock: spinlock serializing access to @irq_map
 * @mux_inputs:  inputs (irq lines entering) the mux
 * @mux_outputs: outputs (irq lines exiting) the mux (connected to the GIC)
 * @irq_map: irq input->output map
 * @reg_base: mux base address
 */
struct tangox_irq_mux {
        raw_spinlock_t lock;
        uint mux_inputs;
        uint mux_outputs;
        uint *irq_map;
        void __iomem *reg_base;
};


#define DBGLOG(__format, ...)                                   \
        do {                                                    \
                pr_info("[%s:%d] %s(): " __format, __FILE__, __LINE__, 
__FUNCTION__ , ##__VA_ARGS__); \
        } while (0)


static inline u32 intc_readl(int address)
{
        u32 value = readl_relaxed((void __iomem *)address);
        //DBGLOG("read 0x%x @ 0x%x\n", value, address);
        return value;
}

static inline void intc_writel(int value, int address)
{
        //DBGLOG("write 0x%x @ 0x%x\n", value, address);
        writel_relaxed(value, (void __iomem *)address);
}

static inline void tangox_setup_irq_route(struct tangox_irq_mux 
*irq_mux_context, int irq_in, int irq_out)
{
        u32 value = irq_out;
        u32 offset = (irq_in * 4);
        u32 address = irq_mux_context->reg_base + offset;

        DBGLOG("route irq %2d (@ 0x%08x) => irq %2d\n", irq_in, address, value);

        if (value)
                value |= 0x80000000;

        intc_writel(value, address);
}


static int tangox_allocate_gic_irq(struct irq_domain *domain,
                                   unsigned virq,
                                   irq_hw_number_t hwirq)
{
        struct tangox_irq_mux *irq_mux_context = domain->host_data;
        struct irq_fwspec fwspec;
        int i;
        int err;

        DBGLOG("domain 0x%p, virq %d (0x%x) hwirq %d (0x%x)\n", domain, virq, 
virq, hwirq, hwirq);

        if (!irq_domain_get_of_node(domain->parent))
                return -EINVAL;

        raw_spin_lock(&(irq_mux_context->lock));
        for (i = irq_mux_context->mux_outputs - 1; i >= 0; i--) {
                if (irq_mux_context->irq_map[i] == IRQ_FREE) {
                        irq_mux_context->irq_map[i] = hwirq;
                        break;
                }
        }
        raw_spin_unlock(&(irq_mux_context->lock));

        if (i < 0)
                return -ENODEV;

        fwspec.fwnode = domain->parent->fwnode;
        fwspec.param_count = 3;
        fwspec.param[0] = 0;    /* SPI */
        fwspec.param[1] = i;
        fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;

        err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
        if (err)
                irq_mux_context->irq_map[i] = IRQ_FREE;
        else
                tangox_setup_irq_route(irq_mux_context, hwirq, i);

        return err;
}

static struct irq_chip mux_chip = {
        .name                   = "CBAR",
        .irq_eoi                = irq_chip_eoi_parent,
        .irq_mask               = irq_chip_mask_parent,
        .irq_unmask             = irq_chip_unmask_parent,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .irq_set_type           = irq_chip_set_type_parent,
        .flags                  = IRQCHIP_MASK_ON_SUSPEND |
                                  IRQCHIP_SKIP_SET_WAKE,
#ifdef CONFIG_SMP
        .irq_set_affinity       = irq_chip_set_affinity_parent,
#endif
};

/**
 * tangox_irq_mux_domain_alloc - map/reserve a mux<->irq connection
 * @domain: domain of irq to map
 * @virq: virq number
 * @nr_irqs: number of irqs to reserve
 *
 */
static int tangox_irq_mux_domain_alloc(struct irq_domain *domain,
                                       unsigned int virq,
                                       unsigned int nr_irqs,
                                       void *data)
{
        struct tangox_irq_mux *irq_mux_context = domain->host_data;
        struct irq_fwspec *fwspec = data;
        irq_hw_number_t hwirq;
        int i;

        DBGLOG("domain 0x%p, virq %d (0x%x) nr_irqs %d\n", domain, virq, virq, 
nr_irqs);

        if (fwspec->param_count != 3)
                return -EINVAL; /* Not GIC compliant */
        if (fwspec->param[0] != 0)
                return -EINVAL; /* No PPI should point to this domain */

        hwirq = fwspec->param[1];
        if ((hwirq + nr_irqs) > irq_mux_context->mux_inputs)
                return -EINVAL; /* Can't deal with this */

        for (i = 0; i < nr_irqs; i++) {
                int err = tangox_allocate_gic_irq(domain, virq + i, hwirq + i);

                if (err)
                        return err;

                irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
                                              &mux_chip, NULL);
        }

        return 0;
}

/**
 * tangox_irq_mux_domain_free - unmap/free a mux<->irq connection
 * @domain: domain of irq to unmap
 * @virq: virq number
 * @nr_irqs: number of irqs to free
 *
 * We do not maintain a use count of total number of map/unmap
 * calls for a particular irq to find out if a irq can be really
 * unmapped. This is because unmap is called during irq_dispose_mapping(irq),
 * after which irq is anyways unusable. So an explicit map has to be called
 * after that.
 */
static void tangox_irq_mux_domain_free(struct irq_domain *domain,
                                       unsigned int virq,
                                       unsigned int nr_irqs)
{
        int i;
        struct tangox_irq_mux *irq_mux_context = domain->host_data;

        DBGLOG("domain 0x%p, virq %d (0x%x) nr_irqs %d\n", domain, virq, virq, 
nr_irqs);

        raw_spin_lock(&(irq_mux_context->lock));
        for (i = 0; i < nr_irqs; i++) {
                struct irq_data *irqdata = irq_domain_get_irq_data(domain, virq 
+ i);

                irq_domain_reset_irq_data(irqdata);
                irq_mux_context->irq_map[irqdata->hwirq] = IRQ_FREE;
                tangox_setup_irq_route(irq_mux_context, 0x0, irqdata->hwirq);
        }
        raw_spin_unlock(&(irq_mux_context->lock));
}

static int tangox_irq_mux_domain_translate(struct irq_domain *domain,
                                           struct irq_fwspec *fwspec,
                                           unsigned long *hwirq,
                                           unsigned int *type)
{

        DBGLOG("domain 0x%p\n", domain);

        if (is_of_node(fwspec->fwnode)) {
                if (fwspec->param_count != 3)
                        return -EINVAL;

                /* No PPI should point to this domain */
                if (fwspec->param[0] != 0)
                        return -EINVAL;

                *hwirq = fwspec->param[1];
                *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
                return 0;
        }

        return -EINVAL;
}

static const struct irq_domain_ops tangox_irq_mux_domain_ops = {
        .alloc          = tangox_irq_mux_domain_alloc,
        .free           = tangox_irq_mux_domain_free,
        .translate      = tangox_irq_mux_domain_translate,
};

static int __init tangox_irq_mux_init(struct device_node *node,
                                      struct tangox_irq_mux *irq_mux_context)
{
        int i, per_irq_reg_size, max_in, max_out, offset, entry, entry_size;
        const __be32 *reserved_irq_list;
        int ret = -ENOMEM;

        DBGLOG("init\n");

        if (!irq_mux_context)
                return -ENOMEM;

        irq_mux_context->reg_base = of_iomap(node, 0);
        if (!irq_mux_context->reg_base)
                goto err_exit;

        irq_mux_context->mux_inputs  = IRQ_MUX_INPUT_LINES;
        irq_mux_context->mux_outputs = IRQ_MUX_OUTPUT_LINES;

        max_in  = irq_mux_context->mux_inputs;
        max_out = irq_mux_context->mux_outputs;

        irq_mux_context->irq_map = kcalloc(max_out, sizeof(int), GFP_KERNEL);
        if (!irq_mux_context->irq_map)
                goto err_unmap_base;

        for (i = 0; i < max_out; i++)
                irq_mux_context->irq_map[i] = IRQ_FREE;

        // mark reserved IRQ lines
        reserved_irq_list = of_get_property(node, "irqs-reserved", &entry_size);
        if (reserved_irq_list) {
                entry_size /= sizeof(__be32);

                DBGLOG("setting up %d reserved irqs\n", entry_size);

                for (i = 0; i < entry_size; i++) {
                        of_property_read_u32_index(node,
                                                   "irqs-reserved",
                                                   i, &entry);
                        if (entry >= max_out) {
                                pr_err("Invalid reserved entry %d > %d: 
ignored\n", entry, max_out);
                                continue;
                        }
                        irq_mux_context->irq_map[entry] = IRQ_RESERVED;
                }
        }

        DBGLOG("disabling free IRQs\n");

        // disable free IRQs during initialisation
        for (i = 0; i < max_in; i++) {
                tangox_setup_irq_route(irq_mux_context, i, 0);
        }

        DBGLOG("init backward compatible map\n");

        tangox_setup_irq_route(irq_mux_context, 125, 2);
        tangox_setup_irq_route(irq_mux_context, 126, 3);
        tangox_setup_irq_route(irq_mux_context, 127, 4);

        raw_spin_lock_init(&irq_mux_context->lock);

        return 0;

err_free_map:
        kfree(irq_mux_context->irq_map);
err_unmap_base:
        iounmap(irq_mux_context->reg_base);
err_exit:
        return ret;
}

static int __init tangox_irq_mux_deinit(struct tangox_irq_mux *irq_mux_context)
{
        if (!irq_mux_context)
                return -ENOMEM;

        if (irq_mux_context->reg_base)
                iounmap(irq_mux_context->reg_base);

        if (irq_mux_context->irq_map)
                kfree(irq_mux_context->irq_map);

        kfree(irq_mux_context);

        return 0;
}

static int __init tangox_of_irq_mux_init(struct device_node *node,
                                         struct device_node *parent)
{
        struct tangox_irq_mux *irq_mux_context;
        struct irq_domain *parent_domain, *domain;
        int err;

        DBGLOG("irqv2 begin\n");

        if (!parent) {
                pr_err("%s: no parent, giving up\n", node->full_name);
                return -ENODEV;
        }

        parent_domain = irq_find_host(parent);
        if (!parent_domain) {
                pr_err("%s: unable to obtain parent domain\n", node->full_name);
                return -ENXIO;
        }

        irq_mux_context = kzalloc(sizeof(*irq_mux_context), GFP_KERNEL);

        err = tangox_irq_mux_init(node, irq_mux_context);
        if (err) {
                pr_err("%s: init failed (%d)\n", node->full_name, err);
                tangox_irq_mux_deinit(irq_mux_context);
                return err;
        }

        domain = irq_domain_add_hierarchy(parent_domain, 0,
                                          irq_mux_context->mux_inputs,
                                          node, &tangox_irq_mux_domain_ops,
                                          irq_mux_context);
        if (!domain) {
                pr_err("%s: failed to allocated domain\n", node->full_name);
                tangox_irq_mux_deinit(irq_mux_context);
                return -ENOMEM;
        }

        return 0;

}
IRQCHIP_DECLARE(tangox_intc, "sigma,smp-irq-mux", tangox_of_irq_mux_init);


Reply via email to