2009-01-09 00:46:40 +00:00
|
|
|
/*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
2016-02-09 19:00:06 +00:00
|
|
|
* Copyright (C) 2004-2016 Cavium, Inc.
|
2009-01-09 00:46:40 +00:00
|
|
|
*/
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
#include <linux/of_address.h>
|
2009-01-09 00:46:40 +00:00
|
|
|
#include <linux/interrupt.h>
|
2012-07-05 16:12:39 +00:00
|
|
|
#include <linux/irqdomain.h>
|
2011-03-25 19:38:51 +00:00
|
|
|
#include <linux/bitops.h>
|
2015-01-15 13:11:19 +00:00
|
|
|
#include <linux/of_irq.h>
|
2011-03-25 19:38:51 +00:00
|
|
|
#include <linux/percpu.h>
|
2012-07-05 16:12:39 +00:00
|
|
|
#include <linux/slab.h>
|
2011-03-25 19:38:51 +00:00
|
|
|
#include <linux/irq.h>
|
2009-06-19 13:05:26 +00:00
|
|
|
#include <linux/smp.h>
|
2012-07-05 16:12:39 +00:00
|
|
|
#include <linux/of.h>
|
2009-01-09 00:46:40 +00:00
|
|
|
|
|
|
|
#include <asm/octeon/octeon.h>
|
2012-04-04 22:34:41 +00:00
|
|
|
#include <asm/octeon/cvmx-ciu2-defs.h>
|
2016-02-09 19:00:11 +00:00
|
|
|
#include <asm/octeon/cvmx-ciu3-defs.h>
|
2009-01-09 00:46:40 +00:00
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
|
|
|
|
static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
|
2012-04-05 17:24:25 +00:00
|
|
|
static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
|
2016-02-09 19:00:11 +00:00
|
|
|
static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip2);
|
|
|
|
|
|
|
|
static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip3);
|
|
|
|
static DEFINE_PER_CPU(struct octeon_ciu3_info *, octeon_ciu3_info);
|
|
|
|
#define CIU3_MBOX_PER_CORE 10
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The 8 most significant bits of the intsn identify the interrupt major block.
|
|
|
|
* Each major block might use its own interrupt domain. Thus 256 domains are
|
|
|
|
* needed.
|
|
|
|
*/
|
|
|
|
#define MAX_CIU3_DOMAINS 256
|
|
|
|
|
|
|
|
typedef irq_hw_number_t (*octeon_ciu3_intsn2hw_t)(struct irq_domain *, unsigned int);
|
|
|
|
|
|
|
|
/* Information for each ciu3 in the system */
|
|
|
|
struct octeon_ciu3_info {
|
|
|
|
u64 ciu3_addr;
|
|
|
|
int node;
|
|
|
|
struct irq_domain *domain[MAX_CIU3_DOMAINS];
|
|
|
|
octeon_ciu3_intsn2hw_t intsn2hw[MAX_CIU3_DOMAINS];
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Each ciu3 in the system uses its own data (one ciu3 per node) */
|
|
|
|
static struct octeon_ciu3_info *octeon_ciu3_info_per_node[4];
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_irq_ciu_domain_data {
|
|
|
|
int num_sum; /* number of sum registers (2 or 3). */
|
|
|
|
};
|
|
|
|
|
2016-02-09 19:00:11 +00:00
|
|
|
/* Register offsets from ciu3_addr */
|
|
|
|
#define CIU3_CONST 0x220
|
|
|
|
#define CIU3_IDT_CTL(_idt) ((_idt) * 8 + 0x110000)
|
|
|
|
#define CIU3_IDT_PP(_idt, _idx) ((_idt) * 32 + (_idx) * 8 + 0x120000)
|
|
|
|
#define CIU3_IDT_IO(_idt) ((_idt) * 8 + 0x130000)
|
|
|
|
#define CIU3_DEST_PP_INT(_pp_ip) ((_pp_ip) * 8 + 0x200000)
|
|
|
|
#define CIU3_DEST_IO_INT(_io) ((_io) * 8 + 0x210000)
|
|
|
|
#define CIU3_ISC_CTL(_intsn) ((_intsn) * 8 + 0x80000000)
|
|
|
|
#define CIU3_ISC_W1C(_intsn) ((_intsn) * 8 + 0x90000000)
|
|
|
|
#define CIU3_ISC_W1S(_intsn) ((_intsn) * 8 + 0xa0000000)
|
|
|
|
|
2016-02-09 19:00:06 +00:00
|
|
|
static __read_mostly int octeon_irq_ciu_to_irq[8][64];
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data {
|
|
|
|
union {
|
|
|
|
struct { /* only used for ciu3 */
|
|
|
|
u64 ciu3_addr;
|
|
|
|
unsigned int intsn;
|
|
|
|
};
|
|
|
|
struct { /* only used for ciu/ciu2 */
|
|
|
|
u8 line;
|
|
|
|
u8 bit;
|
|
|
|
};
|
|
|
|
};
|
2016-02-09 19:00:11 +00:00
|
|
|
int gpio_line;
|
2015-01-15 13:11:19 +00:00
|
|
|
int current_cpu; /* Next CPU expected to take this irq */
|
2016-02-09 19:00:11 +00:00
|
|
|
int ciu_node; /* NUMA node number of the CIU */
|
2011-03-25 19:38:51 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct octeon_core_chip_data {
|
|
|
|
struct mutex core_irq_mutex;
|
|
|
|
bool current_en;
|
|
|
|
bool desired_en;
|
|
|
|
u8 bit;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define MIPS_CORE_IRQ_LINES 8
|
|
|
|
|
|
|
|
static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
|
|
|
|
struct irq_chip *chip,
|
|
|
|
irq_flow_handler_t handler)
|
2011-03-25 19:38:51 +00:00
|
|
|
{
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
|
|
|
|
|
|
|
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
|
|
|
|
if (!cd)
|
|
|
|
return -ENOMEM;
|
2011-03-25 19:38:51 +00:00
|
|
|
|
|
|
|
irq_set_chip_and_handler(irq, chip, handler);
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd->line = line;
|
|
|
|
cd->bit = bit;
|
|
|
|
cd->gpio_line = gpio_line;
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
irq_set_chip_data(irq, cd);
|
2011-03-25 19:38:51 +00:00
|
|
|
octeon_irq_ciu_to_irq[line][bit] = irq;
|
2015-01-15 13:11:19 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
|
|
|
|
{
|
|
|
|
struct irq_data *data = irq_get_irq_data(irq);
|
|
|
|
struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
|
|
|
|
|
|
|
|
irq_set_chip_data(irq, NULL);
|
|
|
|
kfree(cd);
|
2011-03-25 19:38:51 +00:00
|
|
|
}
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
|
|
|
|
int irq, int line, int bit)
|
2012-08-10 23:00:31 +00:00
|
|
|
{
|
2015-01-15 13:11:19 +00:00
|
|
|
return irq_domain_associate(domain, irq, line << 6 | bit);
|
2012-08-10 23:00:31 +00:00
|
|
|
}
|
|
|
|
|
2009-10-13 18:26:03 +00:00
|
|
|
static int octeon_coreid_for_cpu(int cpu)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
return cpu_logical_map(cpu);
|
|
|
|
#else
|
|
|
|
return cvmx_get_core_num();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
static int octeon_cpu_for_coreid(int coreid)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
return cpu_number_map(coreid);
|
|
|
|
#else
|
|
|
|
return smp_processor_id();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_core_ack(struct irq_data *data)
|
2009-01-09 00:46:40 +00:00
|
|
|
{
|
2011-03-25 19:38:51 +00:00
|
|
|
struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
|
|
|
|
unsigned int bit = cd->bit;
|
|
|
|
|
2009-01-09 00:46:40 +00:00
|
|
|
/*
|
|
|
|
* We don't need to disable IRQs to make these atomic since
|
|
|
|
* they are already disabled earlier in the low level
|
|
|
|
* interrupt code.
|
|
|
|
*/
|
|
|
|
clear_c0_status(0x100 << bit);
|
|
|
|
/* The two user interrupts must be cleared manually. */
|
|
|
|
if (bit < 2)
|
|
|
|
clear_c0_cause(0x100 << bit);
|
|
|
|
}
|
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
static void octeon_irq_core_eoi(struct irq_data *data)
|
2009-01-09 00:46:40 +00:00
|
|
|
{
|
2011-03-25 19:38:51 +00:00
|
|
|
struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
|
|
|
|
|
2009-01-09 00:46:40 +00:00
|
|
|
/*
|
|
|
|
* We don't need to disable IRQs to make these atomic since
|
|
|
|
* they are already disabled earlier in the low level
|
|
|
|
* interrupt code.
|
|
|
|
*/
|
2011-03-25 19:38:51 +00:00
|
|
|
set_c0_status(0x100 << cd->bit);
|
2009-01-09 00:46:40 +00:00
|
|
|
}
|
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
static void octeon_irq_core_set_enable_local(void *arg)
|
2009-01-09 00:46:40 +00:00
|
|
|
{
|
2011-03-25 19:38:51 +00:00
|
|
|
struct irq_data *data = arg;
|
|
|
|
struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
|
|
|
|
unsigned int mask = 0x100 << cd->bit;
|
2009-01-09 00:46:40 +00:00
|
|
|
|
|
|
|
/*
|
2011-03-25 19:38:51 +00:00
|
|
|
* Interrupts are already disabled, so these are atomic.
|
2009-01-09 00:46:40 +00:00
|
|
|
*/
|
2011-03-25 19:38:51 +00:00
|
|
|
if (cd->desired_en)
|
|
|
|
set_c0_status(mask);
|
|
|
|
else
|
|
|
|
clear_c0_status(mask);
|
|
|
|
|
2009-01-09 00:46:40 +00:00
|
|
|
}
|
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
static void octeon_irq_core_disable(struct irq_data *data)
|
2009-01-09 00:46:40 +00:00
|
|
|
{
|
2011-03-25 19:38:51 +00:00
|
|
|
struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
|
|
|
|
cd->desired_en = false;
|
2009-01-09 00:46:40 +00:00
|
|
|
}
|
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
static void octeon_irq_core_enable(struct irq_data *data)
|
2009-01-09 00:46:40 +00:00
|
|
|
{
|
2011-03-25 19:38:51 +00:00
|
|
|
struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
|
|
|
|
cd->desired_en = true;
|
2009-01-09 00:46:40 +00:00
|
|
|
}
|
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
static void octeon_irq_core_bus_lock(struct irq_data *data)
|
|
|
|
{
|
|
|
|
struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
|
2009-01-09 00:46:40 +00:00
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
mutex_lock(&cd->core_irq_mutex);
|
|
|
|
}
|
2009-01-09 00:46:40 +00:00
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
|
2009-01-09 00:46:40 +00:00
|
|
|
{
|
2011-03-25 19:38:51 +00:00
|
|
|
struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
|
|
|
|
|
|
|
|
if (cd->desired_en != cd->current_en) {
|
|
|
|
on_each_cpu(octeon_irq_core_set_enable_local, data, 1);
|
|
|
|
|
|
|
|
cd->current_en = cd->desired_en;
|
2010-07-23 17:43:46 +00:00
|
|
|
}
|
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
mutex_unlock(&cd->core_irq_mutex);
|
2009-01-09 00:46:40 +00:00
|
|
|
}
|
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
static struct irq_chip octeon_irq_chip_core = {
|
|
|
|
.name = "Core",
|
|
|
|
.irq_enable = octeon_irq_core_enable,
|
|
|
|
.irq_disable = octeon_irq_core_disable,
|
|
|
|
.irq_ack = octeon_irq_core_ack,
|
|
|
|
.irq_eoi = octeon_irq_core_eoi,
|
|
|
|
.irq_bus_lock = octeon_irq_core_bus_lock,
|
|
|
|
.irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,
|
|
|
|
|
2011-03-27 14:04:30 +00:00
|
|
|
.irq_cpu_online = octeon_irq_core_eoi,
|
|
|
|
.irq_cpu_offline = octeon_irq_core_ack,
|
|
|
|
.flags = IRQCHIP_ONOFFLINE_ENABLED,
|
2011-03-25 19:38:51 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void __init octeon_irq_init_core(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int irq;
|
|
|
|
struct octeon_core_chip_data *cd;
|
|
|
|
|
|
|
|
for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
|
|
|
|
cd = &octeon_irq_core_chip_data[i];
|
|
|
|
cd->current_en = false;
|
|
|
|
cd->desired_en = false;
|
|
|
|
cd->bit = i;
|
|
|
|
mutex_init(&cd->core_irq_mutex);
|
|
|
|
|
|
|
|
irq = OCTEON_IRQ_SW0 + i;
|
2012-08-10 23:00:31 +00:00
|
|
|
irq_set_chip_data(irq, cd);
|
|
|
|
irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
|
|
|
|
handle_percpu_irq);
|
2011-03-25 19:38:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int next_cpu_for_irq(struct irq_data *data)
|
2010-07-23 17:43:46 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
2011-03-25 19:38:51 +00:00
|
|
|
int cpu;
|
2015-07-13 20:45:59 +00:00
|
|
|
struct cpumask *mask = irq_data_get_affinity_mask(data);
|
|
|
|
int weight = cpumask_weight(mask);
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
|
2010-07-23 17:43:46 +00:00
|
|
|
|
|
|
|
if (weight > 1) {
|
2015-01-15 13:11:19 +00:00
|
|
|
cpu = cd->current_cpu;
|
2010-07-23 17:43:46 +00:00
|
|
|
for (;;) {
|
2015-07-13 20:45:59 +00:00
|
|
|
cpu = cpumask_next(cpu, mask);
|
2010-07-23 17:43:46 +00:00
|
|
|
if (cpu >= nr_cpu_ids) {
|
|
|
|
cpu = -1;
|
|
|
|
continue;
|
|
|
|
} else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (weight == 1) {
|
2015-07-13 20:45:59 +00:00
|
|
|
cpu = cpumask_first(mask);
|
2010-07-23 17:43:46 +00:00
|
|
|
} else {
|
2011-03-25 19:38:51 +00:00
|
|
|
cpu = smp_processor_id();
|
2010-07-23 17:43:46 +00:00
|
|
|
}
|
2015-01-15 13:11:19 +00:00
|
|
|
cd->current_cpu = cpu;
|
2011-03-25 19:38:51 +00:00
|
|
|
return cpu;
|
2010-07-23 17:43:46 +00:00
|
|
|
#else
|
2011-03-25 19:38:51 +00:00
|
|
|
return smp_processor_id();
|
2010-07-23 17:43:46 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
static void octeon_irq_ciu_enable(struct irq_data *data)
|
2010-07-23 17:43:46 +00:00
|
|
|
{
|
2011-03-25 19:38:51 +00:00
|
|
|
int cpu = next_cpu_for_irq(data);
|
|
|
|
int coreid = octeon_coreid_for_cpu(cpu);
|
|
|
|
unsigned long *pen;
|
2010-07-23 17:43:46 +00:00
|
|
|
unsigned long flags;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2012-04-05 17:24:25 +00:00
|
|
|
raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
2010-07-23 17:43:46 +00:00
|
|
|
|
2012-04-05 17:24:25 +00:00
|
|
|
raw_spin_lock_irqsave(lock, flags);
|
2015-01-15 13:11:19 +00:00
|
|
|
if (cd->line == 0) {
|
2011-03-25 19:38:51 +00:00
|
|
|
pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
|
2015-01-15 13:11:19 +00:00
|
|
|
__set_bit(cd->bit, pen);
|
2012-04-05 17:24:25 +00:00
|
|
|
/*
|
|
|
|
* Must be visible to octeon_irq_ip{2,3}_ciu() before
|
|
|
|
* enabling the irq.
|
|
|
|
*/
|
|
|
|
wmb();
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
|
|
|
|
} else {
|
|
|
|
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
|
2015-01-15 13:11:19 +00:00
|
|
|
__set_bit(cd->bit, pen);
|
2012-04-05 17:24:25 +00:00
|
|
|
/*
|
|
|
|
* Must be visible to octeon_irq_ip{2,3}_ciu() before
|
|
|
|
* enabling the irq.
|
|
|
|
*/
|
|
|
|
wmb();
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
|
|
|
|
}
|
2012-04-05 17:24:25 +00:00
|
|
|
raw_spin_unlock_irqrestore(lock, flags);
|
2010-07-23 17:43:46 +00:00
|
|
|
}
|
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
static void octeon_irq_ciu_enable_local(struct irq_data *data)
|
|
|
|
{
|
|
|
|
unsigned long *pen;
|
|
|
|
unsigned long flags;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
mips: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 17:30:44 +00:00
|
|
|
raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2012-04-05 17:24:25 +00:00
|
|
|
raw_spin_lock_irqsave(lock, flags);
|
2015-01-15 13:11:19 +00:00
|
|
|
if (cd->line == 0) {
|
mips: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 17:30:44 +00:00
|
|
|
pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
|
2015-01-15 13:11:19 +00:00
|
|
|
__set_bit(cd->bit, pen);
|
2012-04-05 17:24:25 +00:00
|
|
|
/*
|
|
|
|
* Must be visible to octeon_irq_ip{2,3}_ciu() before
|
|
|
|
* enabling the irq.
|
|
|
|
*/
|
|
|
|
wmb();
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
|
|
|
|
} else {
|
mips: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 17:30:44 +00:00
|
|
|
pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
|
2015-01-15 13:11:19 +00:00
|
|
|
__set_bit(cd->bit, pen);
|
2012-04-05 17:24:25 +00:00
|
|
|
/*
|
|
|
|
* Must be visible to octeon_irq_ip{2,3}_ciu() before
|
|
|
|
* enabling the irq.
|
|
|
|
*/
|
|
|
|
wmb();
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
|
|
|
|
}
|
2012-04-05 17:24:25 +00:00
|
|
|
raw_spin_unlock_irqrestore(lock, flags);
|
2011-03-25 19:38:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu_disable_local(struct irq_data *data)
|
|
|
|
{
|
|
|
|
unsigned long *pen;
|
|
|
|
unsigned long flags;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
mips: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 17:30:44 +00:00
|
|
|
raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2012-04-05 17:24:25 +00:00
|
|
|
raw_spin_lock_irqsave(lock, flags);
|
2015-01-15 13:11:19 +00:00
|
|
|
if (cd->line == 0) {
|
mips: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 17:30:44 +00:00
|
|
|
pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
|
2015-01-15 13:11:19 +00:00
|
|
|
__clear_bit(cd->bit, pen);
|
2012-04-05 17:24:25 +00:00
|
|
|
/*
|
|
|
|
* Must be visible to octeon_irq_ip{2,3}_ciu() before
|
|
|
|
* enabling the irq.
|
|
|
|
*/
|
|
|
|
wmb();
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
|
|
|
|
} else {
|
mips: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 17:30:44 +00:00
|
|
|
pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
|
2015-01-15 13:11:19 +00:00
|
|
|
__clear_bit(cd->bit, pen);
|
2012-04-05 17:24:25 +00:00
|
|
|
/*
|
|
|
|
* Must be visible to octeon_irq_ip{2,3}_ciu() before
|
|
|
|
* enabling the irq.
|
|
|
|
*/
|
|
|
|
wmb();
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
|
|
|
|
}
|
2012-04-05 17:24:25 +00:00
|
|
|
raw_spin_unlock_irqrestore(lock, flags);
|
2011-03-25 19:38:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu_disable_all(struct irq_data *data)
|
2009-01-09 00:46:40 +00:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
2011-03-25 19:38:51 +00:00
|
|
|
unsigned long *pen;
|
|
|
|
int cpu;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2012-04-05 17:24:25 +00:00
|
|
|
raw_spinlock_t *lock;
|
2009-01-09 00:46:40 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2012-04-05 17:24:25 +00:00
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
int coreid = octeon_coreid_for_cpu(cpu);
|
|
|
|
lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
|
2015-01-15 13:11:19 +00:00
|
|
|
if (cd->line == 0)
|
2011-03-25 19:38:51 +00:00
|
|
|
pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
|
2012-04-05 17:24:25 +00:00
|
|
|
else
|
2011-03-25 19:38:51 +00:00
|
|
|
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
|
2012-04-05 17:24:25 +00:00
|
|
|
|
|
|
|
raw_spin_lock_irqsave(lock, flags);
|
2015-01-15 13:11:19 +00:00
|
|
|
__clear_bit(cd->bit, pen);
|
2012-04-05 17:24:25 +00:00
|
|
|
/*
|
|
|
|
* Must be visible to octeon_irq_ip{2,3}_ciu() before
|
|
|
|
* enabling the irq.
|
|
|
|
*/
|
|
|
|
wmb();
|
2015-01-15 13:11:19 +00:00
|
|
|
if (cd->line == 0)
|
2012-04-05 17:24:25 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
|
|
|
|
else
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
|
2012-04-05 17:24:25 +00:00
|
|
|
raw_spin_unlock_irqrestore(lock, flags);
|
2011-03-25 19:38:51 +00:00
|
|
|
}
|
2009-01-09 00:46:40 +00:00
|
|
|
}
|
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
static void octeon_irq_ciu_enable_all(struct irq_data *data)
|
2009-01-09 00:46:40 +00:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
2011-03-25 19:38:51 +00:00
|
|
|
unsigned long *pen;
|
2009-01-09 00:46:40 +00:00
|
|
|
int cpu;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2012-04-05 17:24:25 +00:00
|
|
|
raw_spinlock_t *lock;
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2012-04-05 17:24:25 +00:00
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
int coreid = octeon_coreid_for_cpu(cpu);
|
|
|
|
lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
|
2015-01-15 13:11:19 +00:00
|
|
|
if (cd->line == 0)
|
2011-03-25 19:38:51 +00:00
|
|
|
pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
|
2012-04-05 17:24:25 +00:00
|
|
|
else
|
2011-03-25 19:38:51 +00:00
|
|
|
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
|
2012-04-05 17:24:25 +00:00
|
|
|
|
|
|
|
raw_spin_lock_irqsave(lock, flags);
|
2015-01-15 13:11:19 +00:00
|
|
|
__set_bit(cd->bit, pen);
|
2012-04-05 17:24:25 +00:00
|
|
|
/*
|
|
|
|
* Must be visible to octeon_irq_ip{2,3}_ciu() before
|
|
|
|
* enabling the irq.
|
|
|
|
*/
|
|
|
|
wmb();
|
2015-01-15 13:11:19 +00:00
|
|
|
if (cd->line == 0)
|
2012-04-05 17:24:25 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
|
|
|
|
else
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
|
2012-04-05 17:24:25 +00:00
|
|
|
raw_spin_unlock_irqrestore(lock, flags);
|
2009-01-09 00:46:40 +00:00
|
|
|
}
|
2009-10-13 18:26:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-07-23 17:43:46 +00:00
|
|
|
* Enable the irq on the next core in the affinity set for chips that
|
|
|
|
* have the EN*_W1{S,C} registers.
|
2009-10-13 18:26:03 +00:00
|
|
|
*/
|
2011-03-25 19:38:51 +00:00
|
|
|
static void octeon_irq_ciu_enable_v2(struct irq_data *data)
|
2009-10-13 18:26:03 +00:00
|
|
|
{
|
2011-03-25 19:38:51 +00:00
|
|
|
u64 mask;
|
|
|
|
int cpu = next_cpu_for_irq(data);
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2009-10-13 18:26:03 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << (cd->bit);
|
2011-03-25 19:38:51 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Called under the desc lock, so these should never get out
|
|
|
|
* of sync.
|
|
|
|
*/
|
2015-01-15 13:11:19 +00:00
|
|
|
if (cd->line == 0) {
|
2011-03-25 19:38:51 +00:00
|
|
|
int index = octeon_coreid_for_cpu(cpu) * 2;
|
2015-01-15 13:11:19 +00:00
|
|
|
set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
|
2010-07-23 17:43:46 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
|
2011-03-25 19:38:51 +00:00
|
|
|
} else {
|
|
|
|
int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
|
2015-01-15 13:11:19 +00:00
|
|
|
set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
|
2010-07-23 17:43:46 +00:00
|
|
|
}
|
2009-10-13 18:26:03 +00:00
|
|
|
}
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
/*
|
|
|
|
* Enable the irq in the sum2 registers.
|
|
|
|
*/
|
|
|
|
static void octeon_irq_ciu_enable_sum2(struct irq_data *data)
|
|
|
|
{
|
|
|
|
u64 mask;
|
|
|
|
int cpu = next_cpu_for_irq(data);
|
|
|
|
int index = octeon_coreid_for_cpu(cpu);
|
|
|
|
struct octeon_ciu_chip_data *cd;
|
|
|
|
|
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << (cd->bit);
|
|
|
|
|
|
|
|
cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable the irq in the sum2 registers.
|
|
|
|
*/
|
|
|
|
static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data)
|
|
|
|
{
|
|
|
|
u64 mask;
|
|
|
|
int cpu = next_cpu_for_irq(data);
|
|
|
|
int index = octeon_coreid_for_cpu(cpu);
|
|
|
|
struct octeon_ciu_chip_data *cd;
|
|
|
|
|
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << (cd->bit);
|
|
|
|
|
|
|
|
cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu_ack_sum2(struct irq_data *data)
|
|
|
|
{
|
|
|
|
u64 mask;
|
|
|
|
int cpu = next_cpu_for_irq(data);
|
|
|
|
int index = octeon_coreid_for_cpu(cpu);
|
|
|
|
struct octeon_ciu_chip_data *cd;
|
|
|
|
|
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << (cd->bit);
|
|
|
|
|
|
|
|
cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
struct octeon_ciu_chip_data *cd;
|
|
|
|
u64 mask;
|
|
|
|
|
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << (cd->bit);
|
|
|
|
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
int coreid = octeon_coreid_for_cpu(cpu);
|
|
|
|
|
|
|
|
cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-10-13 18:26:03 +00:00
|
|
|
/*
|
2010-07-23 17:43:46 +00:00
|
|
|
* Enable the irq on the current CPU for chips that
|
|
|
|
* have the EN*_W1{S,C} registers.
|
2009-10-13 18:26:03 +00:00
|
|
|
*/
|
2011-03-25 19:38:51 +00:00
|
|
|
static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
|
2009-10-13 18:26:03 +00:00
|
|
|
{
|
2011-03-25 19:38:51 +00:00
|
|
|
u64 mask;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << (cd->bit);
|
2009-10-13 18:26:03 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
if (cd->line == 0) {
|
2011-03-25 19:38:51 +00:00
|
|
|
int index = cvmx_get_core_num() * 2;
|
2015-01-15 13:11:19 +00:00
|
|
|
set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
|
|
|
|
} else {
|
|
|
|
int index = cvmx_get_core_num() * 2 + 1;
|
2015-01-15 13:11:19 +00:00
|
|
|
set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
|
|
|
|
{
|
|
|
|
u64 mask;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << (cd->bit);
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
if (cd->line == 0) {
|
2011-03-25 19:38:51 +00:00
|
|
|
int index = cvmx_get_core_num() * 2;
|
2015-01-15 13:11:19 +00:00
|
|
|
clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
|
|
|
|
} else {
|
|
|
|
int index = cvmx_get_core_num() * 2 + 1;
|
2015-01-15 13:11:19 +00:00
|
|
|
clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
|
|
|
|
}
|
2009-10-13 18:26:03 +00:00
|
|
|
}
|
|
|
|
|
2010-02-15 20:13:18 +00:00
|
|
|
/*
|
2011-03-25 19:38:51 +00:00
|
|
|
* Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
|
2010-02-15 20:13:18 +00:00
|
|
|
*/
|
2011-03-25 19:38:51 +00:00
|
|
|
static void octeon_irq_ciu_ack(struct irq_data *data)
|
|
|
|
{
|
|
|
|
u64 mask;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << (cd->bit);
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
if (cd->line == 0) {
|
2011-03-25 19:38:51 +00:00
|
|
|
int index = cvmx_get_core_num() * 2;
|
2010-07-23 17:43:46 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
|
2011-03-25 19:38:51 +00:00
|
|
|
} else {
|
|
|
|
cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
|
2010-07-23 17:43:46 +00:00
|
|
|
}
|
2010-02-15 20:13:18 +00:00
|
|
|
}
|
|
|
|
|
2010-01-07 19:05:00 +00:00
|
|
|
/*
|
2011-03-25 19:38:51 +00:00
|
|
|
* Disable the irq on the all cores for chips that have the EN*_W1{S,C}
|
2010-01-07 19:05:00 +00:00
|
|
|
* registers.
|
|
|
|
*/
|
2011-03-25 19:38:51 +00:00
|
|
|
static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
|
2010-01-07 19:05:00 +00:00
|
|
|
{
|
2011-03-25 19:38:51 +00:00
|
|
|
int cpu;
|
|
|
|
u64 mask;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2010-01-07 19:05:00 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << (cd->bit);
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
if (cd->line == 0) {
|
2011-03-25 19:38:51 +00:00
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
int index = octeon_coreid_for_cpu(cpu) * 2;
|
2015-01-15 13:11:19 +00:00
|
|
|
clear_bit(cd->bit,
|
|
|
|
&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
|
2015-01-15 13:11:19 +00:00
|
|
|
clear_bit(cd->bit,
|
|
|
|
&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
|
|
|
|
}
|
|
|
|
}
|
2010-01-07 19:05:00 +00:00
|
|
|
}
|
|
|
|
|
2009-10-13 18:26:03 +00:00
|
|
|
/*
|
2011-03-25 19:38:51 +00:00
|
|
|
* Enable the irq on the all cores for chips that have the EN*_W1{S,C}
|
2009-10-13 18:26:03 +00:00
|
|
|
* registers.
|
|
|
|
*/
|
2011-03-25 19:38:51 +00:00
|
|
|
static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
|
2009-10-13 18:26:03 +00:00
|
|
|
{
|
|
|
|
int cpu;
|
2011-03-25 19:38:51 +00:00
|
|
|
u64 mask;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << (cd->bit);
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
if (cd->line == 0) {
|
2011-03-25 19:38:51 +00:00
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
int index = octeon_coreid_for_cpu(cpu) * 2;
|
2015-01-15 13:11:19 +00:00
|
|
|
set_bit(cd->bit,
|
|
|
|
&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
|
2015-01-15 13:11:19 +00:00
|
|
|
set_bit(cd->bit,
|
|
|
|
&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
|
|
|
|
}
|
2009-10-13 18:26:03 +00:00
|
|
|
}
|
2009-01-09 00:46:40 +00:00
|
|
|
}
|
|
|
|
|
2016-02-09 19:00:11 +00:00
|
|
|
static int octeon_irq_ciu_set_type(struct irq_data *data, unsigned int t)
|
|
|
|
{
|
|
|
|
irqd_set_trigger_type(data, t);
|
|
|
|
|
|
|
|
if (t & IRQ_TYPE_EDGE_BOTH)
|
|
|
|
irq_set_handler_locked(data, handle_edge_irq);
|
|
|
|
else
|
|
|
|
irq_set_handler_locked(data, handle_level_irq);
|
|
|
|
|
|
|
|
return IRQ_SET_MASK_OK;
|
|
|
|
}
|
|
|
|
|
2012-07-05 16:12:37 +00:00
|
|
|
static void octeon_irq_gpio_setup(struct irq_data *data)
|
|
|
|
{
|
|
|
|
union cvmx_gpio_bit_cfgx cfg;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2012-07-05 16:12:37 +00:00
|
|
|
u32 t = irqd_get_trigger_type(data);
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
2012-07-05 16:12:37 +00:00
|
|
|
|
|
|
|
cfg.u64 = 0;
|
|
|
|
cfg.s.int_en = 1;
|
|
|
|
cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0;
|
|
|
|
cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0;
|
|
|
|
|
|
|
|
/* 140 nS glitch filter*/
|
|
|
|
cfg.s.fil_cnt = 7;
|
|
|
|
cfg.s.fil_sel = 3;
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64);
|
2012-07-05 16:12:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
|
|
|
|
{
|
|
|
|
octeon_irq_gpio_setup(data);
|
|
|
|
octeon_irq_ciu_enable_v2(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu_enable_gpio(struct irq_data *data)
|
|
|
|
{
|
|
|
|
octeon_irq_gpio_setup(data);
|
|
|
|
octeon_irq_ciu_enable(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
|
|
|
|
{
|
|
|
|
irqd_set_trigger_type(data, t);
|
|
|
|
octeon_irq_gpio_setup(data);
|
|
|
|
|
2016-02-09 19:00:13 +00:00
|
|
|
if (t & IRQ_TYPE_EDGE_BOTH)
|
2015-07-13 20:46:07 +00:00
|
|
|
irq_set_handler_locked(data, handle_edge_irq);
|
|
|
|
else
|
|
|
|
irq_set_handler_locked(data, handle_level_irq);
|
|
|
|
|
2012-07-05 16:12:37 +00:00
|
|
|
return IRQ_SET_MASK_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
|
|
|
|
{
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2012-07-05 16:12:37 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
|
2012-07-05 16:12:37 +00:00
|
|
|
|
|
|
|
octeon_irq_ciu_disable_all_v2(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
|
|
|
|
{
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2012-07-05 16:12:37 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
|
2012-07-05 16:12:37 +00:00
|
|
|
|
|
|
|
octeon_irq_ciu_disable_all(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
|
|
|
|
{
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2012-07-05 16:12:37 +00:00
|
|
|
u64 mask;
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << (cd->gpio_line);
|
2012-07-05 16:12:37 +00:00
|
|
|
|
|
|
|
cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
|
|
|
|
}
|
|
|
|
|
2009-01-09 00:46:40 +00:00
|
|
|
#ifdef CONFIG_SMP
|
2011-03-25 19:38:51 +00:00
|
|
|
|
|
|
|
static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
|
|
|
|
{
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
cpumask_t new_affinity;
|
2015-07-13 20:45:59 +00:00
|
|
|
struct cpumask *mask = irq_data_get_affinity_mask(data);
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-07-13 20:45:59 +00:00
|
|
|
if (!cpumask_test_cpu(cpu, mask))
|
2011-03-25 19:38:51 +00:00
|
|
|
return;
|
|
|
|
|
2015-07-13 20:45:59 +00:00
|
|
|
if (cpumask_weight(mask) > 1) {
|
2011-03-25 19:38:51 +00:00
|
|
|
/*
|
|
|
|
* It has multi CPU affinity, just remove this CPU
|
|
|
|
* from the affinity set.
|
|
|
|
*/
|
2015-07-13 20:45:59 +00:00
|
|
|
cpumask_copy(&new_affinity, mask);
|
2011-03-25 19:38:51 +00:00
|
|
|
cpumask_clear_cpu(cpu, &new_affinity);
|
|
|
|
} else {
|
|
|
|
/* Otherwise, put it on lowest numbered online CPU. */
|
|
|
|
cpumask_clear(&new_affinity);
|
|
|
|
cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
|
|
|
|
}
|
2014-04-16 14:36:44 +00:00
|
|
|
irq_set_affinity_locked(data, &new_affinity, false);
|
2011-03-25 19:38:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int octeon_irq_ciu_set_affinity(struct irq_data *data,
|
|
|
|
const struct cpumask *dest, bool force)
|
2009-01-09 00:46:40 +00:00
|
|
|
{
|
|
|
|
int cpu;
|
2011-03-27 14:04:30 +00:00
|
|
|
bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
|
2009-10-13 15:52:28 +00:00
|
|
|
unsigned long flags;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2012-04-05 17:24:25 +00:00
|
|
|
unsigned long *pen;
|
|
|
|
raw_spinlock_t *lock;
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
2009-01-09 00:46:40 +00:00
|
|
|
|
2010-07-23 17:43:46 +00:00
|
|
|
/*
|
|
|
|
* For non-v2 CIU, we will allow only single CPU affinity.
|
|
|
|
* This removes the need to do locking in the .ack/.eoi
|
|
|
|
* functions.
|
|
|
|
*/
|
|
|
|
if (cpumask_weight(dest) != 1)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2011-03-27 14:04:30 +00:00
|
|
|
if (!enable_one)
|
2011-03-25 19:38:51 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
2012-04-05 17:24:25 +00:00
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
int coreid = octeon_coreid_for_cpu(cpu);
|
|
|
|
|
|
|
|
lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
|
|
|
|
raw_spin_lock_irqsave(lock, flags);
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
if (cd->line == 0)
|
2012-04-05 17:24:25 +00:00
|
|
|
pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
|
|
|
|
else
|
|
|
|
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
|
|
|
|
|
|
|
|
if (cpumask_test_cpu(cpu, dest) && enable_one) {
|
|
|
|
enable_one = 0;
|
2015-01-15 13:11:19 +00:00
|
|
|
__set_bit(cd->bit, pen);
|
2012-04-05 17:24:25 +00:00
|
|
|
} else {
|
2015-01-15 13:11:19 +00:00
|
|
|
__clear_bit(cd->bit, pen);
|
2011-03-25 19:38:51 +00:00
|
|
|
}
|
2012-04-05 17:24:25 +00:00
|
|
|
/*
|
|
|
|
* Must be visible to octeon_irq_ip{2,3}_ciu() before
|
|
|
|
* enabling the irq.
|
|
|
|
*/
|
|
|
|
wmb();
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
if (cd->line == 0)
|
2012-04-05 17:24:25 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
|
|
|
|
else
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
|
2012-04-05 17:24:25 +00:00
|
|
|
|
|
|
|
raw_spin_unlock_irqrestore(lock, flags);
|
2009-01-09 00:46:40 +00:00
|
|
|
}
|
2009-04-28 00:59:21 +00:00
|
|
|
return 0;
|
2009-01-09 00:46:40 +00:00
|
|
|
}
|
2009-10-13 18:26:03 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set affinity for the irq for chips that have the EN*_W1{S,C}
|
|
|
|
* registers.
|
|
|
|
*/
|
2011-03-25 19:38:51 +00:00
|
|
|
static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
|
|
|
|
const struct cpumask *dest,
|
|
|
|
bool force)
|
2009-10-13 18:26:03 +00:00
|
|
|
{
|
|
|
|
int cpu;
|
2011-03-27 14:04:30 +00:00
|
|
|
bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
|
2011-03-25 19:38:51 +00:00
|
|
|
u64 mask;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2011-03-27 14:04:30 +00:00
|
|
|
if (!enable_one)
|
2011-03-25 19:38:51 +00:00
|
|
|
return 0;
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << cd->bit;
|
2011-03-25 19:38:51 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
if (cd->line == 0) {
|
2011-03-25 19:38:51 +00:00
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
|
|
|
|
int index = octeon_coreid_for_cpu(cpu) * 2;
|
|
|
|
if (cpumask_test_cpu(cpu, dest) && enable_one) {
|
2011-03-27 14:04:30 +00:00
|
|
|
enable_one = false;
|
2015-01-15 13:11:19 +00:00
|
|
|
set_bit(cd->bit, pen);
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
|
|
|
|
} else {
|
2015-01-15 13:11:19 +00:00
|
|
|
clear_bit(cd->bit, pen);
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
|
|
|
|
int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
|
|
|
|
if (cpumask_test_cpu(cpu, dest) && enable_one) {
|
2011-03-27 14:04:30 +00:00
|
|
|
enable_one = false;
|
2015-01-15 13:11:19 +00:00
|
|
|
set_bit(cd->bit, pen);
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
|
|
|
|
} else {
|
2015-01-15 13:11:19 +00:00
|
|
|
clear_bit(cd->bit, pen);
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
|
|
|
|
}
|
2010-07-23 17:43:46 +00:00
|
|
|
}
|
2009-10-13 18:26:03 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2015-01-15 13:11:19 +00:00
|
|
|
|
|
|
|
static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data,
|
|
|
|
const struct cpumask *dest,
|
|
|
|
bool force)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
|
|
|
|
u64 mask;
|
|
|
|
struct octeon_ciu_chip_data *cd;
|
|
|
|
|
|
|
|
if (!enable_one)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << cd->bit;
|
|
|
|
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
int index = octeon_coreid_for_cpu(cpu);
|
|
|
|
|
|
|
|
if (cpumask_test_cpu(cpu, dest) && enable_one) {
|
|
|
|
enable_one = false;
|
|
|
|
cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
|
|
|
|
} else {
|
|
|
|
cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2009-01-09 00:46:40 +00:00
|
|
|
#endif
|
|
|
|
|
2016-02-09 19:00:11 +00:00
|
|
|
static unsigned int edge_startup(struct irq_data *data)
|
|
|
|
{
|
|
|
|
/* ack any pending edge-irq at startup, so there is
|
|
|
|
* an _edge_ to fire on when the event reappears.
|
|
|
|
*/
|
|
|
|
data->chip->irq_ack(data);
|
|
|
|
data->chip->irq_enable(data);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-10-13 18:26:03 +00:00
|
|
|
/*
|
|
|
|
* Newer octeon chips have support for lockless CIU operation.
|
|
|
|
*/
|
2011-03-25 19:38:51 +00:00
|
|
|
static struct irq_chip octeon_irq_chip_ciu_v2 = {
|
2015-01-15 13:11:18 +00:00
|
|
|
.name = "CIU",
|
|
|
|
.irq_enable = octeon_irq_ciu_enable_v2,
|
|
|
|
.irq_disable = octeon_irq_ciu_disable_all_v2,
|
|
|
|
.irq_mask = octeon_irq_ciu_disable_local_v2,
|
|
|
|
.irq_unmask = octeon_irq_ciu_enable_v2,
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
|
|
|
|
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
|
2011-03-25 19:38:51 +00:00
|
|
|
.name = "CIU",
|
|
|
|
.irq_enable = octeon_irq_ciu_enable_v2,
|
|
|
|
.irq_disable = octeon_irq_ciu_disable_all_v2,
|
|
|
|
.irq_ack = octeon_irq_ciu_ack,
|
|
|
|
.irq_mask = octeon_irq_ciu_disable_local_v2,
|
|
|
|
.irq_unmask = octeon_irq_ciu_enable_v2,
|
2009-01-09 00:46:40 +00:00
|
|
|
#ifdef CONFIG_SMP
|
2011-03-25 19:38:51 +00:00
|
|
|
.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
|
|
|
|
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
|
2009-01-09 00:46:40 +00:00
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
/*
|
|
|
|
* Newer octeon chips have support for lockless CIU operation.
|
|
|
|
*/
|
|
|
|
static struct irq_chip octeon_irq_chip_ciu_sum2 = {
|
|
|
|
.name = "CIU",
|
|
|
|
.irq_enable = octeon_irq_ciu_enable_sum2,
|
|
|
|
.irq_disable = octeon_irq_ciu_disable_all_sum2,
|
|
|
|
.irq_mask = octeon_irq_ciu_disable_local_sum2,
|
|
|
|
.irq_unmask = octeon_irq_ciu_enable_sum2,
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
.irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
|
|
|
|
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
|
|
|
|
.name = "CIU",
|
|
|
|
.irq_enable = octeon_irq_ciu_enable_sum2,
|
|
|
|
.irq_disable = octeon_irq_ciu_disable_all_sum2,
|
|
|
|
.irq_ack = octeon_irq_ciu_ack_sum2,
|
|
|
|
.irq_mask = octeon_irq_ciu_disable_local_sum2,
|
|
|
|
.irq_unmask = octeon_irq_ciu_enable_sum2,
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
.irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
|
|
|
|
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
static struct irq_chip octeon_irq_chip_ciu = {
|
2015-01-15 13:11:18 +00:00
|
|
|
.name = "CIU",
|
|
|
|
.irq_enable = octeon_irq_ciu_enable,
|
|
|
|
.irq_disable = octeon_irq_ciu_disable_all,
|
|
|
|
.irq_mask = octeon_irq_ciu_disable_local,
|
|
|
|
.irq_unmask = octeon_irq_ciu_enable,
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
.irq_set_affinity = octeon_irq_ciu_set_affinity,
|
|
|
|
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct irq_chip octeon_irq_chip_ciu_edge = {
|
2011-03-25 19:38:51 +00:00
|
|
|
.name = "CIU",
|
|
|
|
.irq_enable = octeon_irq_ciu_enable,
|
|
|
|
.irq_disable = octeon_irq_ciu_disable_all,
|
|
|
|
.irq_ack = octeon_irq_ciu_ack,
|
2012-04-05 17:24:25 +00:00
|
|
|
.irq_mask = octeon_irq_ciu_disable_local,
|
|
|
|
.irq_unmask = octeon_irq_ciu_enable,
|
2011-03-25 19:38:51 +00:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
.irq_set_affinity = octeon_irq_ciu_set_affinity,
|
|
|
|
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
|
|
|
|
#endif
|
2010-02-15 20:13:18 +00:00
|
|
|
};
|
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
/* The mbox versions don't do any affinity or round-robin. */
|
|
|
|
static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
|
|
|
|
.name = "CIU-M",
|
|
|
|
.irq_enable = octeon_irq_ciu_enable_all_v2,
|
|
|
|
.irq_disable = octeon_irq_ciu_disable_all_v2,
|
|
|
|
.irq_ack = octeon_irq_ciu_disable_local_v2,
|
|
|
|
.irq_eoi = octeon_irq_ciu_enable_local_v2,
|
|
|
|
|
2011-03-27 14:04:30 +00:00
|
|
|
.irq_cpu_online = octeon_irq_ciu_enable_local_v2,
|
|
|
|
.irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
|
|
|
|
.flags = IRQCHIP_ONOFFLINE_ENABLED,
|
2011-03-25 19:38:51 +00:00
|
|
|
};
|
2009-01-09 00:46:40 +00:00
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
static struct irq_chip octeon_irq_chip_ciu_mbox = {
|
|
|
|
.name = "CIU-M",
|
|
|
|
.irq_enable = octeon_irq_ciu_enable_all,
|
|
|
|
.irq_disable = octeon_irq_ciu_disable_all,
|
2012-04-05 17:24:25 +00:00
|
|
|
.irq_ack = octeon_irq_ciu_disable_local,
|
|
|
|
.irq_eoi = octeon_irq_ciu_enable_local,
|
2009-01-09 00:46:40 +00:00
|
|
|
|
2011-03-27 14:04:30 +00:00
|
|
|
.irq_cpu_online = octeon_irq_ciu_enable_local,
|
|
|
|
.irq_cpu_offline = octeon_irq_ciu_disable_local,
|
|
|
|
.flags = IRQCHIP_ONOFFLINE_ENABLED,
|
2011-03-25 19:38:51 +00:00
|
|
|
};
|
|
|
|
|
2012-07-05 16:12:37 +00:00
|
|
|
static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
|
|
|
|
.name = "CIU-GPIO",
|
|
|
|
.irq_enable = octeon_irq_ciu_enable_gpio_v2,
|
|
|
|
.irq_disable = octeon_irq_ciu_disable_gpio_v2,
|
|
|
|
.irq_ack = octeon_irq_ciu_gpio_ack,
|
|
|
|
.irq_mask = octeon_irq_ciu_disable_local_v2,
|
|
|
|
.irq_unmask = octeon_irq_ciu_enable_v2,
|
|
|
|
.irq_set_type = octeon_irq_ciu_gpio_set_type,
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
|
2014-10-23 13:55:04 +00:00
|
|
|
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
|
2012-07-05 16:12:37 +00:00
|
|
|
#endif
|
|
|
|
.flags = IRQCHIP_SET_TYPE_MASKED,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct irq_chip octeon_irq_chip_ciu_gpio = {
|
|
|
|
.name = "CIU-GPIO",
|
|
|
|
.irq_enable = octeon_irq_ciu_enable_gpio,
|
|
|
|
.irq_disable = octeon_irq_ciu_disable_gpio,
|
2012-04-05 17:24:25 +00:00
|
|
|
.irq_mask = octeon_irq_ciu_disable_local,
|
|
|
|
.irq_unmask = octeon_irq_ciu_enable,
|
2012-07-05 16:12:37 +00:00
|
|
|
.irq_ack = octeon_irq_ciu_gpio_ack,
|
|
|
|
.irq_set_type = octeon_irq_ciu_gpio_set_type,
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
.irq_set_affinity = octeon_irq_ciu_set_affinity,
|
2014-10-23 13:55:04 +00:00
|
|
|
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
|
2012-07-05 16:12:37 +00:00
|
|
|
#endif
|
|
|
|
.flags = IRQCHIP_SET_TYPE_MASKED,
|
|
|
|
};
|
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
/*
|
|
|
|
* Watchdog interrupts are special. They are associated with a single
|
|
|
|
* core, so we hardwire the affinity to that core.
|
|
|
|
*/
|
|
|
|
static void octeon_irq_ciu_wd_enable(struct irq_data *data)
|
2009-01-09 00:46:40 +00:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
2011-03-25 19:38:51 +00:00
|
|
|
unsigned long *pen;
|
|
|
|
int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
|
|
|
|
int cpu = octeon_cpu_for_coreid(coreid);
|
2012-04-05 17:24:25 +00:00
|
|
|
raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
|
2009-01-09 00:46:40 +00:00
|
|
|
|
2012-04-05 17:24:25 +00:00
|
|
|
raw_spin_lock_irqsave(lock, flags);
|
2011-03-25 19:38:51 +00:00
|
|
|
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
|
2012-04-05 17:24:25 +00:00
|
|
|
__set_bit(coreid, pen);
|
|
|
|
/*
|
|
|
|
* Must be visible to octeon_irq_ip{2,3}_ciu() before enabling
|
|
|
|
* the irq.
|
|
|
|
*/
|
|
|
|
wmb();
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
|
2012-04-05 17:24:25 +00:00
|
|
|
raw_spin_unlock_irqrestore(lock, flags);
|
2009-01-09 00:46:40 +00:00
|
|
|
}
|
|
|
|
|
2010-07-23 17:43:46 +00:00
|
|
|
/*
|
|
|
|
* Watchdog interrupts are special. They are associated with a single
|
|
|
|
* core, so we hardwire the affinity to that core.
|
|
|
|
*/
|
2011-03-25 19:38:51 +00:00
|
|
|
static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
|
2010-07-23 17:43:46 +00:00
|
|
|
{
|
2011-03-25 19:38:51 +00:00
|
|
|
int coreid = data->irq - OCTEON_IRQ_WDOG0;
|
|
|
|
int cpu = octeon_cpu_for_coreid(coreid);
|
2010-07-23 17:43:46 +00:00
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
|
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
|
2010-07-23 17:43:46 +00:00
|
|
|
}
|
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
|
|
|
|
static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
|
|
|
|
.name = "CIU-W",
|
|
|
|
.irq_enable = octeon_irq_ciu1_wd_enable_v2,
|
|
|
|
.irq_disable = octeon_irq_ciu_disable_all_v2,
|
|
|
|
.irq_mask = octeon_irq_ciu_disable_local_v2,
|
|
|
|
.irq_unmask = octeon_irq_ciu_enable_local_v2,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct irq_chip octeon_irq_chip_ciu_wd = {
|
|
|
|
.name = "CIU-W",
|
|
|
|
.irq_enable = octeon_irq_ciu_wd_enable,
|
|
|
|
.irq_disable = octeon_irq_ciu_disable_all,
|
2012-04-05 17:24:25 +00:00
|
|
|
.irq_mask = octeon_irq_ciu_disable_local,
|
|
|
|
.irq_unmask = octeon_irq_ciu_enable_local,
|
2011-03-25 19:38:51 +00:00
|
|
|
};
|
|
|
|
|
2012-07-05 16:12:39 +00:00
|
|
|
static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit)
|
|
|
|
{
|
|
|
|
bool edge = false;
|
|
|
|
|
|
|
|
if (line == 0)
|
|
|
|
switch (bit) {
|
|
|
|
case 48 ... 49: /* GMX DRP */
|
|
|
|
case 50: /* IPD_DRP */
|
|
|
|
case 52 ... 55: /* Timers */
|
|
|
|
case 58: /* MPI */
|
|
|
|
edge = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else /* line == 1 */
|
|
|
|
switch (bit) {
|
|
|
|
case 47: /* PTP */
|
|
|
|
edge = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return edge;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct octeon_irq_gpio_domain_data {
|
|
|
|
unsigned int base_hwirq;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int octeon_irq_gpio_xlat(struct irq_domain *d,
|
|
|
|
struct device_node *node,
|
|
|
|
const u32 *intspec,
|
|
|
|
unsigned int intsize,
|
|
|
|
unsigned long *out_hwirq,
|
|
|
|
unsigned int *out_type)
|
|
|
|
{
|
|
|
|
unsigned int type;
|
|
|
|
unsigned int pin;
|
|
|
|
unsigned int trigger;
|
|
|
|
|
2015-10-13 11:51:29 +00:00
|
|
|
if (irq_domain_get_of_node(d) != node)
|
2012-07-05 16:12:39 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (intsize < 2)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
pin = intspec[0];
|
|
|
|
if (pin >= 16)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
trigger = intspec[1];
|
|
|
|
|
|
|
|
switch (trigger) {
|
|
|
|
case 1:
|
|
|
|
type = IRQ_TYPE_EDGE_RISING;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
type = IRQ_TYPE_EDGE_FALLING;
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
type = IRQ_TYPE_LEVEL_HIGH;
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
type = IRQ_TYPE_LEVEL_LOW;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_err("Error: (%s) Invalid irq trigger specification: %x\n",
|
|
|
|
node->name,
|
|
|
|
trigger);
|
|
|
|
type = IRQ_TYPE_LEVEL_LOW;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
*out_type = type;
|
2012-08-10 23:00:31 +00:00
|
|
|
*out_hwirq = pin;
|
2012-07-05 16:12:39 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int octeon_irq_ciu_xlat(struct irq_domain *d,
|
|
|
|
struct device_node *node,
|
|
|
|
const u32 *intspec,
|
|
|
|
unsigned int intsize,
|
|
|
|
unsigned long *out_hwirq,
|
|
|
|
unsigned int *out_type)
|
|
|
|
{
|
|
|
|
unsigned int ciu, bit;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_irq_ciu_domain_data *dd = d->host_data;
|
2012-07-05 16:12:39 +00:00
|
|
|
|
|
|
|
ciu = intspec[0];
|
|
|
|
bit = intspec[1];
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
if (ciu >= dd->num_sum || bit > 63)
|
2012-07-05 16:12:39 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
*out_hwirq = (ciu << 6) | bit;
|
|
|
|
*out_type = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct irq_chip *octeon_irq_ciu_chip;
|
2015-01-15 13:11:18 +00:00
|
|
|
static struct irq_chip *octeon_irq_ciu_chip_edge;
|
2012-07-05 16:12:39 +00:00
|
|
|
static struct irq_chip *octeon_irq_gpio_chip;
|
|
|
|
|
|
|
|
static int octeon_irq_ciu_map(struct irq_domain *d,
|
|
|
|
unsigned int virq, irq_hw_number_t hw)
|
|
|
|
{
|
2015-01-15 13:11:19 +00:00
|
|
|
int rv;
|
2012-07-05 16:12:39 +00:00
|
|
|
unsigned int line = hw >> 6;
|
|
|
|
unsigned int bit = hw & 63;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_irq_ciu_domain_data *dd = d->host_data;
|
2012-07-05 16:12:39 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
|
2012-07-05 16:12:39 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
if (line == 2) {
|
|
|
|
if (octeon_irq_ciu_is_edge(line, bit))
|
|
|
|
rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
|
|
|
|
&octeon_irq_chip_ciu_sum2_edge,
|
|
|
|
handle_edge_irq);
|
|
|
|
else
|
|
|
|
rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
|
|
|
|
&octeon_irq_chip_ciu_sum2,
|
|
|
|
handle_level_irq);
|
|
|
|
} else {
|
|
|
|
if (octeon_irq_ciu_is_edge(line, bit))
|
|
|
|
rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
|
|
|
|
octeon_irq_ciu_chip_edge,
|
|
|
|
handle_edge_irq);
|
|
|
|
else
|
|
|
|
rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
|
|
|
|
octeon_irq_ciu_chip,
|
|
|
|
handle_level_irq);
|
|
|
|
}
|
|
|
|
return rv;
|
2012-07-05 16:12:39 +00:00
|
|
|
}
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
static int octeon_irq_gpio_map(struct irq_domain *d,
|
|
|
|
unsigned int virq, irq_hw_number_t hw)
|
2012-07-05 16:12:39 +00:00
|
|
|
{
|
2012-08-10 23:00:31 +00:00
|
|
|
struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
|
|
|
|
unsigned int line, bit;
|
2015-01-15 13:11:19 +00:00
|
|
|
int r;
|
2012-07-05 16:12:39 +00:00
|
|
|
|
2013-04-11 15:29:39 +00:00
|
|
|
line = (hw + gpiod->base_hwirq) >> 6;
|
|
|
|
bit = (hw + gpiod->base_hwirq) & 63;
|
2015-01-15 13:11:19 +00:00
|
|
|
if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
|
|
|
|
octeon_irq_ciu_to_irq[line][bit] != 0)
|
2012-07-05 16:12:39 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2015-07-13 20:46:07 +00:00
|
|
|
/*
|
|
|
|
* Default to handle_level_irq. If the DT contains a different
|
|
|
|
* trigger type, it will call the irq_set_type callback and
|
|
|
|
* the handler gets updated.
|
|
|
|
*/
|
2015-01-15 13:11:19 +00:00
|
|
|
r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
|
2015-07-13 20:46:07 +00:00
|
|
|
octeon_irq_gpio_chip, handle_level_irq);
|
2015-01-15 13:11:19 +00:00
|
|
|
return r;
|
2012-04-04 22:34:41 +00:00
|
|
|
}
|
|
|
|
|
2012-07-05 16:12:39 +00:00
|
|
|
static struct irq_domain_ops octeon_irq_domain_ciu_ops = {
|
|
|
|
.map = octeon_irq_ciu_map,
|
2015-01-15 13:11:19 +00:00
|
|
|
.unmap = octeon_irq_free_cd,
|
2012-07-05 16:12:39 +00:00
|
|
|
.xlate = octeon_irq_ciu_xlat,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct irq_domain_ops octeon_irq_domain_gpio_ops = {
|
|
|
|
.map = octeon_irq_gpio_map,
|
2015-01-15 13:11:19 +00:00
|
|
|
.unmap = octeon_irq_free_cd,
|
2012-07-05 16:12:39 +00:00
|
|
|
.xlate = octeon_irq_gpio_xlat,
|
|
|
|
};
|
|
|
|
|
2012-04-05 17:24:25 +00:00
|
|
|
static void octeon_irq_ip2_ciu(void)
|
2009-10-13 18:26:03 +00:00
|
|
|
{
|
2011-03-25 19:38:51 +00:00
|
|
|
const unsigned long core_id = cvmx_get_core_num();
|
|
|
|
u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
|
|
|
|
|
mips: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 17:30:44 +00:00
|
|
|
ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror);
|
2011-03-25 19:38:51 +00:00
|
|
|
if (likely(ciu_sum)) {
|
|
|
|
int bit = fls64(ciu_sum) - 1;
|
|
|
|
int irq = octeon_irq_ciu_to_irq[0][bit];
|
|
|
|
if (likely(irq))
|
|
|
|
do_IRQ(irq);
|
|
|
|
else
|
|
|
|
spurious_interrupt();
|
|
|
|
} else {
|
|
|
|
spurious_interrupt();
|
2010-07-23 17:43:46 +00:00
|
|
|
}
|
2009-10-13 18:26:03 +00:00
|
|
|
}
|
|
|
|
|
2012-04-05 17:24:25 +00:00
|
|
|
static void octeon_irq_ip3_ciu(void)
|
2010-01-07 19:05:00 +00:00
|
|
|
{
|
2011-03-25 19:38:51 +00:00
|
|
|
u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
|
|
|
|
|
mips: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 17:30:44 +00:00
|
|
|
ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror);
|
2011-03-25 19:38:51 +00:00
|
|
|
if (likely(ciu_sum)) {
|
|
|
|
int bit = fls64(ciu_sum) - 1;
|
|
|
|
int irq = octeon_irq_ciu_to_irq[1][bit];
|
|
|
|
if (likely(irq))
|
|
|
|
do_IRQ(irq);
|
|
|
|
else
|
|
|
|
spurious_interrupt();
|
|
|
|
} else {
|
|
|
|
spurious_interrupt();
|
|
|
|
}
|
2010-01-07 19:05:00 +00:00
|
|
|
}
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
static void octeon_irq_ip4_ciu(void)
|
|
|
|
{
|
|
|
|
int coreid = cvmx_get_core_num();
|
|
|
|
u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid));
|
|
|
|
u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid));
|
|
|
|
|
|
|
|
ciu_sum &= ciu_en;
|
|
|
|
if (likely(ciu_sum)) {
|
|
|
|
int bit = fls64(ciu_sum) - 1;
|
|
|
|
int irq = octeon_irq_ciu_to_irq[2][bit];
|
|
|
|
|
|
|
|
if (likely(irq))
|
|
|
|
do_IRQ(irq);
|
|
|
|
else
|
|
|
|
spurious_interrupt();
|
|
|
|
} else {
|
|
|
|
spurious_interrupt();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-04 22:34:41 +00:00
|
|
|
static bool octeon_irq_use_ip4;
|
|
|
|
|
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 13:38:59 +00:00
|
|
|
static void octeon_irq_local_enable_ip4(void *arg)
|
2012-04-04 22:34:41 +00:00
|
|
|
{
|
|
|
|
set_c0_status(STATUSF_IP4);
|
|
|
|
}
|
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
static void octeon_irq_ip4_mask(void)
|
2009-10-13 18:26:03 +00:00
|
|
|
{
|
2011-03-25 19:38:51 +00:00
|
|
|
clear_c0_status(STATUSF_IP4);
|
|
|
|
spurious_interrupt();
|
2009-01-09 00:46:40 +00:00
|
|
|
}
|
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
static void (*octeon_irq_ip2)(void);
|
|
|
|
static void (*octeon_irq_ip3)(void);
|
|
|
|
static void (*octeon_irq_ip4)(void);
|
2009-01-09 00:46:40 +00:00
|
|
|
|
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 13:38:59 +00:00
|
|
|
void (*octeon_irq_setup_secondary)(void);
|
2010-07-23 17:43:46 +00:00
|
|
|
|
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 13:38:59 +00:00
|
|
|
void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
|
2012-04-04 22:34:41 +00:00
|
|
|
{
|
|
|
|
octeon_irq_ip4 = h;
|
|
|
|
octeon_irq_use_ip4 = true;
|
|
|
|
on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
|
|
|
|
}
|
|
|
|
|
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 13:38:59 +00:00
|
|
|
static void octeon_irq_percpu_enable(void)
|
2011-03-25 19:38:51 +00:00
|
|
|
{
|
|
|
|
irq_cpu_online();
|
|
|
|
}
|
|
|
|
|
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 13:38:59 +00:00
|
|
|
static void octeon_irq_init_ciu_percpu(void)
|
2011-03-25 19:38:51 +00:00
|
|
|
{
|
|
|
|
int coreid = cvmx_get_core_num();
|
2012-04-05 17:24:25 +00:00
|
|
|
|
|
|
|
|
mips: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 17:30:44 +00:00
|
|
|
__this_cpu_write(octeon_irq_ciu0_en_mirror, 0);
|
|
|
|
__this_cpu_write(octeon_irq_ciu1_en_mirror, 0);
|
2012-04-05 17:24:25 +00:00
|
|
|
wmb();
|
mips: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 17:30:44 +00:00
|
|
|
raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock));
|
2009-01-09 00:46:40 +00:00
|
|
|
/*
|
2011-03-25 19:38:51 +00:00
|
|
|
* Disable All CIU Interrupts. The ones we need will be
|
|
|
|
* enabled later. Read the SUM register so we know the write
|
|
|
|
* completed.
|
2009-01-09 00:46:40 +00:00
|
|
|
*/
|
2011-03-25 19:38:51 +00:00
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
|
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
|
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
|
|
|
|
cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
|
|
|
|
cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
|
2009-01-09 00:46:40 +00:00
|
|
|
}
|
2009-10-13 18:26:03 +00:00
|
|
|
|
2012-04-04 22:34:41 +00:00
|
|
|
static void octeon_irq_init_ciu2_percpu(void)
|
|
|
|
{
|
|
|
|
u64 regx, ipx;
|
|
|
|
int coreid = cvmx_get_core_num();
|
|
|
|
u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable All CIU2 Interrupts. The ones we need will be
|
|
|
|
* enabled later. Read the SUM register so we know the write
|
|
|
|
* completed.
|
|
|
|
*
|
|
|
|
* There are 9 registers and 3 IPX levels with strides 0x1000
|
|
|
|
* and 0x200 respectivly. Use loops to clear them.
|
|
|
|
*/
|
|
|
|
for (regx = 0; regx <= 0x8000; regx += 0x1000) {
|
|
|
|
for (ipx = 0; ipx <= 0x400; ipx += 0x200)
|
|
|
|
cvmx_write_csr(base + regx + ipx, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
|
|
|
|
}
|
|
|
|
|
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 13:38:59 +00:00
|
|
|
static void octeon_irq_setup_secondary_ciu(void)
|
2009-10-13 18:26:03 +00:00
|
|
|
{
|
2011-03-25 19:38:51 +00:00
|
|
|
octeon_irq_init_ciu_percpu();
|
|
|
|
octeon_irq_percpu_enable();
|
2009-01-09 00:46:40 +00:00
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
/* Enable the CIU lines */
|
|
|
|
set_c0_status(STATUSF_IP3 | STATUSF_IP2);
|
2015-01-15 13:11:19 +00:00
|
|
|
if (octeon_irq_use_ip4)
|
|
|
|
set_c0_status(STATUSF_IP4);
|
|
|
|
else
|
|
|
|
clear_c0_status(STATUSF_IP4);
|
2011-03-25 19:38:51 +00:00
|
|
|
}
|
2010-07-23 17:43:46 +00:00
|
|
|
|
2012-04-04 22:34:41 +00:00
|
|
|
static void octeon_irq_setup_secondary_ciu2(void)
|
|
|
|
{
|
|
|
|
octeon_irq_init_ciu2_percpu();
|
|
|
|
octeon_irq_percpu_enable();
|
|
|
|
|
|
|
|
/* Enable the CIU lines */
|
|
|
|
set_c0_status(STATUSF_IP3 | STATUSF_IP2);
|
|
|
|
if (octeon_irq_use_ip4)
|
|
|
|
set_c0_status(STATUSF_IP4);
|
|
|
|
else
|
|
|
|
clear_c0_status(STATUSF_IP4);
|
|
|
|
}
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
static int __init octeon_irq_init_ciu(
|
|
|
|
struct device_node *ciu_node, struct device_node *parent)
|
2011-03-25 19:38:51 +00:00
|
|
|
{
|
2015-01-15 13:11:19 +00:00
|
|
|
unsigned int i, r;
|
2011-03-25 19:38:51 +00:00
|
|
|
struct irq_chip *chip;
|
2015-01-15 13:11:18 +00:00
|
|
|
struct irq_chip *chip_edge;
|
2011-03-25 19:38:51 +00:00
|
|
|
struct irq_chip *chip_mbox;
|
|
|
|
struct irq_chip *chip_wd;
|
2012-08-10 23:00:31 +00:00
|
|
|
struct irq_domain *ciu_domain = NULL;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_irq_ciu_domain_data *dd;
|
|
|
|
|
|
|
|
dd = kzalloc(sizeof(*dd), GFP_KERNEL);
|
|
|
|
if (!dd)
|
|
|
|
return -ENOMEM;
|
2011-03-25 19:38:51 +00:00
|
|
|
|
|
|
|
octeon_irq_init_ciu_percpu();
|
|
|
|
octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
|
2010-07-23 17:43:46 +00:00
|
|
|
|
2012-04-05 17:24:25 +00:00
|
|
|
octeon_irq_ip2 = octeon_irq_ip2_ciu;
|
|
|
|
octeon_irq_ip3 = octeon_irq_ip3_ciu;
|
2015-01-15 13:11:19 +00:00
|
|
|
if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
|
|
|
|
&& !OCTEON_IS_MODEL(OCTEON_CN63XX)) {
|
|
|
|
octeon_irq_ip4 = octeon_irq_ip4_ciu;
|
|
|
|
dd->num_sum = 3;
|
|
|
|
octeon_irq_use_ip4 = true;
|
|
|
|
} else {
|
|
|
|
octeon_irq_ip4 = octeon_irq_ip4_mask;
|
|
|
|
dd->num_sum = 2;
|
|
|
|
octeon_irq_use_ip4 = false;
|
|
|
|
}
|
2011-03-25 19:38:51 +00:00
|
|
|
if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
|
|
|
|
OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
|
|
|
|
OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
|
2015-01-15 13:11:14 +00:00
|
|
|
OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
|
2011-03-25 19:38:51 +00:00
|
|
|
chip = &octeon_irq_chip_ciu_v2;
|
2015-01-15 13:11:18 +00:00
|
|
|
chip_edge = &octeon_irq_chip_ciu_v2_edge;
|
2011-03-25 19:38:51 +00:00
|
|
|
chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
|
|
|
|
chip_wd = &octeon_irq_chip_ciu_wd_v2;
|
2012-07-05 16:12:39 +00:00
|
|
|
octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
|
2011-03-25 19:38:51 +00:00
|
|
|
} else {
|
|
|
|
chip = &octeon_irq_chip_ciu;
|
2015-01-15 13:11:18 +00:00
|
|
|
chip_edge = &octeon_irq_chip_ciu_edge;
|
2011-03-25 19:38:51 +00:00
|
|
|
chip_mbox = &octeon_irq_chip_ciu_mbox;
|
|
|
|
chip_wd = &octeon_irq_chip_ciu_wd;
|
2012-07-05 16:12:39 +00:00
|
|
|
octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
|
2011-03-25 19:38:51 +00:00
|
|
|
}
|
2012-07-05 16:12:39 +00:00
|
|
|
octeon_irq_ciu_chip = chip;
|
2015-01-15 13:11:18 +00:00
|
|
|
octeon_irq_ciu_chip_edge = chip_edge;
|
2011-03-25 19:38:51 +00:00
|
|
|
|
|
|
|
/* Mips internal */
|
|
|
|
octeon_irq_init_core();
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
ciu_domain = irq_domain_add_tree(
|
|
|
|
ciu_node, &octeon_irq_domain_ciu_ops, dd);
|
|
|
|
irq_set_default_host(ciu_domain);
|
2012-08-10 23:00:31 +00:00
|
|
|
|
|
|
|
/* CIU_0 */
|
2015-01-15 13:11:19 +00:00
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
r = octeon_irq_force_ciu_mapping(
|
|
|
|
ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = octeon_irq_set_ciu_mapping(
|
|
|
|
OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
|
|
|
r = octeon_irq_set_ciu_mapping(
|
|
|
|
OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
r = octeon_irq_force_ciu_mapping(
|
|
|
|
ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
r = octeon_irq_force_ciu_mapping(
|
|
|
|
ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
|
|
|
}
|
2012-08-10 23:00:31 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
2012-08-10 23:00:31 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
2012-08-10 23:00:31 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
r = octeon_irq_force_ciu_mapping(
|
|
|
|
ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
2012-08-10 23:00:31 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
2012-08-10 23:00:31 +00:00
|
|
|
|
|
|
|
/* CIU_1 */
|
2015-01-15 13:11:19 +00:00
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
r = octeon_irq_set_ciu_mapping(
|
|
|
|
i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd,
|
|
|
|
handle_level_irq);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
|
|
|
}
|
2012-08-10 23:00:31 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
2012-07-05 16:12:39 +00:00
|
|
|
|
2011-03-25 19:38:51 +00:00
|
|
|
/* Enable the CIU lines */
|
|
|
|
set_c0_status(STATUSF_IP3 | STATUSF_IP2);
|
2015-01-15 13:11:19 +00:00
|
|
|
if (octeon_irq_use_ip4)
|
|
|
|
set_c0_status(STATUSF_IP4);
|
|
|
|
else
|
|
|
|
clear_c0_status(STATUSF_IP4);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
return r;
|
2011-03-25 19:38:51 +00:00
|
|
|
}
|
2010-07-23 17:43:46 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
static int __init octeon_irq_init_gpio(
|
|
|
|
struct device_node *gpio_node, struct device_node *parent)
|
|
|
|
{
|
|
|
|
struct octeon_irq_gpio_domain_data *gpiod;
|
|
|
|
u32 interrupt_cells;
|
|
|
|
unsigned int base_hwirq;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
if (interrupt_cells == 1) {
|
|
|
|
u32 v;
|
|
|
|
|
|
|
|
r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v);
|
|
|
|
if (r) {
|
|
|
|
pr_warn("No \"interrupts\" property.\n");
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
base_hwirq = v;
|
|
|
|
} else if (interrupt_cells == 2) {
|
|
|
|
u32 v0, v1;
|
|
|
|
|
|
|
|
r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0);
|
|
|
|
if (r) {
|
|
|
|
pr_warn("No \"interrupts\" property.\n");
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1);
|
|
|
|
if (r) {
|
|
|
|
pr_warn("No \"interrupts\" property.\n");
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
base_hwirq = (v0 << 6) | v1;
|
|
|
|
} else {
|
|
|
|
pr_warn("Bad \"#interrupt-cells\" property: %u\n",
|
|
|
|
interrupt_cells);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
|
|
|
|
if (gpiod) {
|
|
|
|
/* gpio domain host_data is the base hwirq number. */
|
|
|
|
gpiod->base_hwirq = base_hwirq;
|
|
|
|
irq_domain_add_linear(
|
|
|
|
gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
|
|
|
|
} else {
|
|
|
|
pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2012-04-04 22:34:41 +00:00
|
|
|
/*
|
|
|
|
* Watchdog interrupts are special. They are associated with a single
|
|
|
|
* core, so we hardwire the affinity to that core.
|
|
|
|
*/
|
|
|
|
static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
|
|
|
|
{
|
|
|
|
u64 mask;
|
|
|
|
u64 en_addr;
|
|
|
|
int coreid = data->irq - OCTEON_IRQ_WDOG0;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2012-04-04 22:34:41 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << (cd->bit);
|
2012-04-04 22:34:41 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
|
|
|
|
(0x1000ull * cd->line);
|
2012-04-04 22:34:41 +00:00
|
|
|
cvmx_write_csr(en_addr, mask);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu2_enable(struct irq_data *data)
|
|
|
|
{
|
|
|
|
u64 mask;
|
|
|
|
u64 en_addr;
|
|
|
|
int cpu = next_cpu_for_irq(data);
|
|
|
|
int coreid = octeon_coreid_for_cpu(cpu);
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2012-04-04 22:34:41 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << (cd->bit);
|
2012-04-04 22:34:41 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
|
|
|
|
(0x1000ull * cd->line);
|
2012-04-04 22:34:41 +00:00
|
|
|
cvmx_write_csr(en_addr, mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu2_enable_local(struct irq_data *data)
|
|
|
|
{
|
|
|
|
u64 mask;
|
|
|
|
u64 en_addr;
|
|
|
|
int coreid = cvmx_get_core_num();
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2012-04-04 22:34:41 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << (cd->bit);
|
2012-04-04 22:34:41 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
|
|
|
|
(0x1000ull * cd->line);
|
2012-04-04 22:34:41 +00:00
|
|
|
cvmx_write_csr(en_addr, mask);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu2_disable_local(struct irq_data *data)
|
|
|
|
{
|
|
|
|
u64 mask;
|
|
|
|
u64 en_addr;
|
|
|
|
int coreid = cvmx_get_core_num();
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2012-04-04 22:34:41 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << (cd->bit);
|
2012-04-04 22:34:41 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) +
|
|
|
|
(0x1000ull * cd->line);
|
2012-04-04 22:34:41 +00:00
|
|
|
cvmx_write_csr(en_addr, mask);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu2_ack(struct irq_data *data)
|
|
|
|
{
|
|
|
|
u64 mask;
|
|
|
|
u64 en_addr;
|
|
|
|
int coreid = cvmx_get_core_num();
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2012-04-04 22:34:41 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << (cd->bit);
|
2012-04-04 22:34:41 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line);
|
2012-04-04 22:34:41 +00:00
|
|
|
cvmx_write_csr(en_addr, mask);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu2_disable_all(struct irq_data *data)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
u64 mask;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2012-04-04 22:34:41 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << (cd->bit);
|
2012-04-04 22:34:41 +00:00
|
|
|
|
|
|
|
for_each_online_cpu(cpu) {
|
2015-01-15 13:11:19 +00:00
|
|
|
u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
|
|
|
|
octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
|
2012-04-04 22:34:41 +00:00
|
|
|
cvmx_write_csr(en_addr, mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
u64 mask;
|
|
|
|
|
|
|
|
mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
|
|
|
|
|
|
|
|
for_each_online_cpu(cpu) {
|
2015-01-15 13:11:19 +00:00
|
|
|
u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
|
|
|
|
octeon_coreid_for_cpu(cpu));
|
2012-04-04 22:34:41 +00:00
|
|
|
cvmx_write_csr(en_addr, mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
u64 mask;
|
|
|
|
|
|
|
|
mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
|
|
|
|
|
|
|
|
for_each_online_cpu(cpu) {
|
2015-01-15 13:11:19 +00:00
|
|
|
u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
|
|
|
|
octeon_coreid_for_cpu(cpu));
|
2012-04-04 22:34:41 +00:00
|
|
|
cvmx_write_csr(en_addr, mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data)
|
|
|
|
{
|
|
|
|
u64 mask;
|
|
|
|
u64 en_addr;
|
|
|
|
int coreid = cvmx_get_core_num();
|
|
|
|
|
|
|
|
mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
|
|
|
|
en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid);
|
|
|
|
cvmx_write_csr(en_addr, mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data)
|
|
|
|
{
|
|
|
|
u64 mask;
|
|
|
|
u64 en_addr;
|
|
|
|
int coreid = cvmx_get_core_num();
|
|
|
|
|
|
|
|
mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
|
|
|
|
en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid);
|
|
|
|
cvmx_write_csr(en_addr, mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
|
|
|
|
const struct cpumask *dest, bool force)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
|
|
|
|
u64 mask;
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2012-04-04 22:34:41 +00:00
|
|
|
|
|
|
|
if (!enable_one)
|
|
|
|
return 0;
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
mask = 1ull << cd->bit;
|
2012-04-04 22:34:41 +00:00
|
|
|
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
u64 en_addr;
|
|
|
|
if (cpumask_test_cpu(cpu, dest) && enable_one) {
|
|
|
|
enable_one = false;
|
2015-01-15 13:11:19 +00:00
|
|
|
en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
|
|
|
|
octeon_coreid_for_cpu(cpu)) +
|
|
|
|
(0x1000ull * cd->line);
|
2012-04-04 22:34:41 +00:00
|
|
|
} else {
|
2015-01-15 13:11:19 +00:00
|
|
|
en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
|
|
|
|
octeon_coreid_for_cpu(cpu)) +
|
|
|
|
(0x1000ull * cd->line);
|
2012-04-04 22:34:41 +00:00
|
|
|
}
|
|
|
|
cvmx_write_csr(en_addr, mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
|
|
|
|
{
|
|
|
|
octeon_irq_gpio_setup(data);
|
|
|
|
octeon_irq_ciu2_enable(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
|
|
|
|
{
|
2015-01-15 13:11:19 +00:00
|
|
|
struct octeon_ciu_chip_data *cd;
|
2012-04-04 22:34:41 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
|
|
|
|
cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
|
2012-04-04 22:34:41 +00:00
|
|
|
|
|
|
|
octeon_irq_ciu2_disable_all(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct irq_chip octeon_irq_chip_ciu2 = {
|
2015-01-15 13:11:18 +00:00
|
|
|
.name = "CIU2-E",
|
|
|
|
.irq_enable = octeon_irq_ciu2_enable,
|
|
|
|
.irq_disable = octeon_irq_ciu2_disable_all,
|
|
|
|
.irq_mask = octeon_irq_ciu2_disable_local,
|
|
|
|
.irq_unmask = octeon_irq_ciu2_enable,
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
.irq_set_affinity = octeon_irq_ciu2_set_affinity,
|
|
|
|
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct irq_chip octeon_irq_chip_ciu2_edge = {
|
2012-04-04 22:34:41 +00:00
|
|
|
.name = "CIU2-E",
|
|
|
|
.irq_enable = octeon_irq_ciu2_enable,
|
|
|
|
.irq_disable = octeon_irq_ciu2_disable_all,
|
|
|
|
.irq_ack = octeon_irq_ciu2_ack,
|
|
|
|
.irq_mask = octeon_irq_ciu2_disable_local,
|
|
|
|
.irq_unmask = octeon_irq_ciu2_enable,
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
.irq_set_affinity = octeon_irq_ciu2_set_affinity,
|
|
|
|
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct irq_chip octeon_irq_chip_ciu2_mbox = {
|
|
|
|
.name = "CIU2-M",
|
|
|
|
.irq_enable = octeon_irq_ciu2_mbox_enable_all,
|
|
|
|
.irq_disable = octeon_irq_ciu2_mbox_disable_all,
|
|
|
|
.irq_ack = octeon_irq_ciu2_mbox_disable_local,
|
|
|
|
.irq_eoi = octeon_irq_ciu2_mbox_enable_local,
|
|
|
|
|
|
|
|
.irq_cpu_online = octeon_irq_ciu2_mbox_enable_local,
|
|
|
|
.irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local,
|
|
|
|
.flags = IRQCHIP_ONOFFLINE_ENABLED,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct irq_chip octeon_irq_chip_ciu2_wd = {
|
|
|
|
.name = "CIU2-W",
|
|
|
|
.irq_enable = octeon_irq_ciu2_wd_enable,
|
|
|
|
.irq_disable = octeon_irq_ciu2_disable_all,
|
|
|
|
.irq_mask = octeon_irq_ciu2_disable_local,
|
|
|
|
.irq_unmask = octeon_irq_ciu2_enable_local,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct irq_chip octeon_irq_chip_ciu2_gpio = {
|
|
|
|
.name = "CIU-GPIO",
|
|
|
|
.irq_enable = octeon_irq_ciu2_enable_gpio,
|
|
|
|
.irq_disable = octeon_irq_ciu2_disable_gpio,
|
|
|
|
.irq_ack = octeon_irq_ciu_gpio_ack,
|
|
|
|
.irq_mask = octeon_irq_ciu2_disable_local,
|
|
|
|
.irq_unmask = octeon_irq_ciu2_enable,
|
|
|
|
.irq_set_type = octeon_irq_ciu_gpio_set_type,
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
.irq_set_affinity = octeon_irq_ciu2_set_affinity,
|
|
|
|
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
|
|
|
|
#endif
|
|
|
|
.flags = IRQCHIP_SET_TYPE_MASKED,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int octeon_irq_ciu2_xlat(struct irq_domain *d,
|
|
|
|
struct device_node *node,
|
|
|
|
const u32 *intspec,
|
|
|
|
unsigned int intsize,
|
|
|
|
unsigned long *out_hwirq,
|
|
|
|
unsigned int *out_type)
|
|
|
|
{
|
|
|
|
unsigned int ciu, bit;
|
|
|
|
|
|
|
|
ciu = intspec[0];
|
|
|
|
bit = intspec[1];
|
|
|
|
|
|
|
|
*out_hwirq = (ciu << 6) | bit;
|
|
|
|
*out_type = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
|
|
|
|
{
|
|
|
|
bool edge = false;
|
|
|
|
|
|
|
|
if (line == 3) /* MIO */
|
|
|
|
switch (bit) {
|
2013-01-22 11:59:30 +00:00
|
|
|
case 2: /* IPD_DRP */
|
2012-04-04 22:34:41 +00:00
|
|
|
case 8 ... 11: /* Timers */
|
|
|
|
case 48: /* PTP */
|
|
|
|
edge = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else if (line == 6) /* PKT */
|
|
|
|
switch (bit) {
|
|
|
|
case 52 ... 53: /* ILK_DRP */
|
2013-01-22 11:59:30 +00:00
|
|
|
case 8 ... 12: /* GMX_DRP */
|
2012-04-04 22:34:41 +00:00
|
|
|
edge = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return edge;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int octeon_irq_ciu2_map(struct irq_domain *d,
|
|
|
|
unsigned int virq, irq_hw_number_t hw)
|
|
|
|
{
|
|
|
|
unsigned int line = hw >> 6;
|
|
|
|
unsigned int bit = hw & 63;
|
|
|
|
|
2014-03-19 22:03:30 +00:00
|
|
|
/*
|
|
|
|
* Don't map irq if it is reserved for GPIO.
|
|
|
|
* (Line 7 are the GPIO lines.)
|
|
|
|
*/
|
|
|
|
if (line == 7)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
|
2012-04-04 22:34:41 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (octeon_irq_ciu2_is_edge(line, bit))
|
|
|
|
octeon_irq_set_ciu_mapping(virq, line, bit, 0,
|
2015-01-15 13:11:18 +00:00
|
|
|
&octeon_irq_chip_ciu2_edge,
|
2012-04-04 22:34:41 +00:00
|
|
|
handle_edge_irq);
|
|
|
|
else
|
|
|
|
octeon_irq_set_ciu_mapping(virq, line, bit, 0,
|
|
|
|
&octeon_irq_chip_ciu2,
|
|
|
|
handle_level_irq);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
|
|
|
|
.map = octeon_irq_ciu2_map,
|
2015-01-15 13:11:19 +00:00
|
|
|
.unmap = octeon_irq_free_cd,
|
2012-04-04 22:34:41 +00:00
|
|
|
.xlate = octeon_irq_ciu2_xlat,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void octeon_irq_ciu2(void)
|
|
|
|
{
|
|
|
|
int line;
|
|
|
|
int bit;
|
|
|
|
int irq;
|
|
|
|
u64 src_reg, src, sum;
|
|
|
|
const unsigned long core_id = cvmx_get_core_num();
|
|
|
|
|
|
|
|
sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful;
|
|
|
|
|
|
|
|
if (unlikely(!sum))
|
|
|
|
goto spurious;
|
|
|
|
|
|
|
|
line = fls64(sum) - 1;
|
|
|
|
src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line);
|
|
|
|
src = cvmx_read_csr(src_reg);
|
|
|
|
|
|
|
|
if (unlikely(!src))
|
|
|
|
goto spurious;
|
|
|
|
|
|
|
|
bit = fls64(src) - 1;
|
|
|
|
irq = octeon_irq_ciu_to_irq[line][bit];
|
|
|
|
if (unlikely(!irq))
|
|
|
|
goto spurious;
|
|
|
|
|
|
|
|
do_IRQ(irq);
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
spurious:
|
|
|
|
spurious_interrupt();
|
|
|
|
out:
|
|
|
|
/* CN68XX pass 1.x has an errata that accessing the ACK registers
|
|
|
|
can stop interrupts from propagating */
|
|
|
|
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
|
|
|
|
cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
|
|
|
|
else
|
|
|
|
cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu2_mbox(void)
|
|
|
|
{
|
|
|
|
int line;
|
|
|
|
|
|
|
|
const unsigned long core_id = cvmx_get_core_num();
|
|
|
|
u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60;
|
|
|
|
|
|
|
|
if (unlikely(!sum))
|
|
|
|
goto spurious;
|
|
|
|
|
|
|
|
line = fls64(sum) - 1;
|
|
|
|
|
|
|
|
do_IRQ(OCTEON_IRQ_MBOX0 + line);
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
spurious:
|
|
|
|
spurious_interrupt();
|
|
|
|
out:
|
|
|
|
/* CN68XX pass 1.x has an errata that accessing the ACK registers
|
|
|
|
can stop interrupts from propagating */
|
|
|
|
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
|
|
|
|
cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
|
|
|
|
else
|
|
|
|
cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
static int __init octeon_irq_init_ciu2(
|
|
|
|
struct device_node *ciu_node, struct device_node *parent)
|
2012-04-04 22:34:41 +00:00
|
|
|
{
|
2015-01-15 13:11:19 +00:00
|
|
|
unsigned int i, r;
|
2012-04-04 22:34:41 +00:00
|
|
|
struct irq_domain *ciu_domain = NULL;
|
|
|
|
|
|
|
|
octeon_irq_init_ciu2_percpu();
|
|
|
|
octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio;
|
2012-04-04 22:34:41 +00:00
|
|
|
octeon_irq_ip2 = octeon_irq_ciu2;
|
|
|
|
octeon_irq_ip3 = octeon_irq_ciu2_mbox;
|
|
|
|
octeon_irq_ip4 = octeon_irq_ip4_mask;
|
|
|
|
|
|
|
|
/* Mips internal */
|
|
|
|
octeon_irq_init_core();
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
ciu_domain = irq_domain_add_tree(
|
|
|
|
ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
|
|
|
|
irq_set_default_host(ciu_domain);
|
2012-04-04 22:34:41 +00:00
|
|
|
|
|
|
|
/* CUI2 */
|
2015-01-15 13:11:19 +00:00
|
|
|
for (i = 0; i < 64; i++) {
|
|
|
|
r = octeon_irq_force_ciu_mapping(
|
|
|
|
ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
|
|
|
}
|
2012-04-04 22:34:41 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
for (i = 0; i < 32; i++) {
|
|
|
|
r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
|
|
|
|
&octeon_irq_chip_ciu2_wd, handle_level_irq);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
|
|
|
}
|
2012-04-04 22:34:41 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
r = octeon_irq_force_ciu_mapping(
|
|
|
|
ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
|
|
|
}
|
2012-04-04 22:34:41 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
2012-04-04 22:34:41 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
r = octeon_irq_force_ciu_mapping(
|
|
|
|
ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
|
|
|
}
|
2012-04-04 22:34:41 +00:00
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
r = octeon_irq_force_ciu_mapping(
|
|
|
|
ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
|
|
|
}
|
2012-04-04 22:34:41 +00:00
|
|
|
|
|
|
|
irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
|
|
|
|
irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
|
|
|
|
irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
|
|
|
|
irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
|
|
|
|
|
|
|
|
/* Enable the CIU lines */
|
|
|
|
set_c0_status(STATUSF_IP3 | STATUSF_IP2);
|
|
|
|
clear_c0_status(STATUSF_IP4);
|
2015-01-15 13:11:19 +00:00
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct octeon_irq_cib_host_data {
|
|
|
|
raw_spinlock_t lock;
|
|
|
|
u64 raw_reg;
|
|
|
|
u64 en_reg;
|
|
|
|
int max_bits;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct octeon_irq_cib_chip_data {
|
|
|
|
struct octeon_irq_cib_host_data *host_data;
|
|
|
|
int bit;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void octeon_irq_cib_enable(struct irq_data *data)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
u64 en;
|
|
|
|
struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
|
|
|
|
struct octeon_irq_cib_host_data *host_data = cd->host_data;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&host_data->lock, flags);
|
|
|
|
en = cvmx_read_csr(host_data->en_reg);
|
|
|
|
en |= 1ull << cd->bit;
|
|
|
|
cvmx_write_csr(host_data->en_reg, en);
|
|
|
|
raw_spin_unlock_irqrestore(&host_data->lock, flags);
|
2012-04-04 22:34:41 +00:00
|
|
|
}
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
static void octeon_irq_cib_disable(struct irq_data *data)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
u64 en;
|
|
|
|
struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
|
|
|
|
struct octeon_irq_cib_host_data *host_data = cd->host_data;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&host_data->lock, flags);
|
|
|
|
en = cvmx_read_csr(host_data->en_reg);
|
|
|
|
en &= ~(1ull << cd->bit);
|
|
|
|
cvmx_write_csr(host_data->en_reg, en);
|
|
|
|
raw_spin_unlock_irqrestore(&host_data->lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t)
|
|
|
|
{
|
|
|
|
irqd_set_trigger_type(data, t);
|
|
|
|
return IRQ_SET_MASK_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct irq_chip octeon_irq_chip_cib = {
|
|
|
|
.name = "CIB",
|
|
|
|
.irq_enable = octeon_irq_cib_enable,
|
|
|
|
.irq_disable = octeon_irq_cib_disable,
|
|
|
|
.irq_mask = octeon_irq_cib_disable,
|
|
|
|
.irq_unmask = octeon_irq_cib_enable,
|
|
|
|
.irq_set_type = octeon_irq_cib_set_type,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int octeon_irq_cib_xlat(struct irq_domain *d,
|
|
|
|
struct device_node *node,
|
|
|
|
const u32 *intspec,
|
|
|
|
unsigned int intsize,
|
|
|
|
unsigned long *out_hwirq,
|
|
|
|
unsigned int *out_type)
|
|
|
|
{
|
|
|
|
unsigned int type = 0;
|
|
|
|
|
|
|
|
if (intsize == 2)
|
|
|
|
type = intspec[1];
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case 0: /* unofficial value, but we might as well let it work. */
|
|
|
|
case 4: /* official value for level triggering. */
|
|
|
|
*out_type = IRQ_TYPE_LEVEL_HIGH;
|
|
|
|
break;
|
|
|
|
case 1: /* official value for edge triggering. */
|
|
|
|
*out_type = IRQ_TYPE_EDGE_RISING;
|
|
|
|
break;
|
|
|
|
default: /* Nothing else is acceptable. */
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*out_hwirq = intspec[0];
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int octeon_irq_cib_map(struct irq_domain *d,
|
|
|
|
unsigned int virq, irq_hw_number_t hw)
|
|
|
|
{
|
|
|
|
struct octeon_irq_cib_host_data *host_data = d->host_data;
|
|
|
|
struct octeon_irq_cib_chip_data *cd;
|
|
|
|
|
|
|
|
if (hw >= host_data->max_bits) {
|
|
|
|
pr_err("ERROR: %s mapping %u is to big!\n",
|
2015-10-13 11:51:29 +00:00
|
|
|
irq_domain_get_of_node(d)->name, (unsigned)hw);
|
2015-01-15 13:11:19 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
|
|
|
|
cd->host_data = host_data;
|
|
|
|
cd->bit = hw;
|
|
|
|
|
|
|
|
irq_set_chip_and_handler(virq, &octeon_irq_chip_cib,
|
|
|
|
handle_simple_irq);
|
|
|
|
irq_set_chip_data(virq, cd);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct irq_domain_ops octeon_irq_domain_cib_ops = {
|
|
|
|
.map = octeon_irq_cib_map,
|
|
|
|
.unmap = octeon_irq_free_cd,
|
|
|
|
.xlate = octeon_irq_cib_xlat,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Chain to real handler. */
|
|
|
|
static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
|
|
|
|
{
|
|
|
|
u64 en;
|
|
|
|
u64 raw;
|
|
|
|
u64 bits;
|
|
|
|
int i;
|
|
|
|
int irq;
|
|
|
|
struct irq_domain *cib_domain = data;
|
|
|
|
struct octeon_irq_cib_host_data *host_data = cib_domain->host_data;
|
|
|
|
|
|
|
|
en = cvmx_read_csr(host_data->en_reg);
|
|
|
|
raw = cvmx_read_csr(host_data->raw_reg);
|
|
|
|
|
|
|
|
bits = en & raw;
|
|
|
|
|
|
|
|
for (i = 0; i < host_data->max_bits; i++) {
|
|
|
|
if ((bits & 1ull << i) == 0)
|
|
|
|
continue;
|
|
|
|
irq = irq_find_mapping(cib_domain, i);
|
|
|
|
if (!irq) {
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
|
|
|
|
i, host_data->raw_reg);
|
|
|
|
raw_spin_lock_irqsave(&host_data->lock, flags);
|
|
|
|
en = cvmx_read_csr(host_data->en_reg);
|
|
|
|
en &= ~(1ull << i);
|
|
|
|
cvmx_write_csr(host_data->en_reg, en);
|
|
|
|
cvmx_write_csr(host_data->raw_reg, 1ull << i);
|
|
|
|
raw_spin_unlock_irqrestore(&host_data->lock, flags);
|
|
|
|
} else {
|
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
|
|
struct irq_data *irq_data = irq_desc_get_irq_data(desc);
|
|
|
|
/* If edge, acknowledge the bit we will be sending. */
|
|
|
|
if (irqd_get_trigger_type(irq_data) &
|
|
|
|
IRQ_TYPE_EDGE_BOTH)
|
|
|
|
cvmx_write_csr(host_data->raw_reg, 1ull << i);
|
2015-09-14 08:42:37 +00:00
|
|
|
generic_handle_irq_desc(desc);
|
2015-01-15 13:11:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init octeon_irq_init_cib(struct device_node *ciu_node,
|
|
|
|
struct device_node *parent)
|
|
|
|
{
|
|
|
|
const __be32 *addr;
|
|
|
|
u32 val;
|
|
|
|
struct octeon_irq_cib_host_data *host_data;
|
|
|
|
int parent_irq;
|
|
|
|
int r;
|
|
|
|
struct irq_domain *cib_domain;
|
|
|
|
|
|
|
|
parent_irq = irq_of_parse_and_map(ciu_node, 0);
|
|
|
|
if (!parent_irq) {
|
|
|
|
pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
|
|
|
|
ciu_node->name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
|
|
|
|
raw_spin_lock_init(&host_data->lock);
|
|
|
|
|
|
|
|
addr = of_get_address(ciu_node, 0, NULL, NULL);
|
|
|
|
if (!addr) {
|
|
|
|
pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
host_data->raw_reg = (u64)phys_to_virt(
|
|
|
|
of_translate_address(ciu_node, addr));
|
|
|
|
|
|
|
|
addr = of_get_address(ciu_node, 1, NULL, NULL);
|
|
|
|
if (!addr) {
|
|
|
|
pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
host_data->en_reg = (u64)phys_to_virt(
|
|
|
|
of_translate_address(ciu_node, addr));
|
|
|
|
|
|
|
|
r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
|
|
|
|
if (r) {
|
|
|
|
pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
|
|
|
|
ciu_node->name);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
host_data->max_bits = val;
|
|
|
|
|
|
|
|
cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits,
|
|
|
|
&octeon_irq_domain_cib_ops,
|
|
|
|
host_data);
|
|
|
|
if (!cib_domain) {
|
|
|
|
pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */
|
|
|
|
cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */
|
|
|
|
|
|
|
|
r = request_irq(parent_irq, octeon_irq_cib_handler,
|
|
|
|
IRQF_NO_THREAD, "cib", cib_domain);
|
|
|
|
if (r) {
|
|
|
|
pr_err("request_irq cib failed %d\n", r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
pr_info("CIB interrupt controller probed: %llx %d\n",
|
|
|
|
host_data->raw_reg, host_data->max_bits);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-02-09 19:00:11 +00:00
|
|
|
int octeon_irq_ciu3_xlat(struct irq_domain *d,
|
|
|
|
struct device_node *node,
|
|
|
|
const u32 *intspec,
|
|
|
|
unsigned int intsize,
|
|
|
|
unsigned long *out_hwirq,
|
|
|
|
unsigned int *out_type)
|
|
|
|
{
|
|
|
|
struct octeon_ciu3_info *ciu3_info = d->host_data;
|
|
|
|
unsigned int hwirq, type, intsn_major;
|
|
|
|
union cvmx_ciu3_iscx_ctl isc;
|
|
|
|
|
|
|
|
if (intsize < 2)
|
|
|
|
return -EINVAL;
|
|
|
|
hwirq = intspec[0];
|
|
|
|
type = intspec[1];
|
|
|
|
|
|
|
|
if (hwirq >= (1 << 20))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
intsn_major = hwirq >> 12;
|
|
|
|
switch (intsn_major) {
|
|
|
|
case 0x04: /* Software handled separately. */
|
|
|
|
return -EINVAL;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
isc.u64 = cvmx_read_csr(ciu3_info->ciu3_addr + CIU3_ISC_CTL(hwirq));
|
|
|
|
if (!isc.s.imp)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case 4: /* official value for level triggering. */
|
|
|
|
*out_type = IRQ_TYPE_LEVEL_HIGH;
|
|
|
|
break;
|
|
|
|
case 0: /* unofficial value, but we might as well let it work. */
|
|
|
|
case 1: /* official value for edge triggering. */
|
|
|
|
*out_type = IRQ_TYPE_EDGE_RISING;
|
|
|
|
break;
|
|
|
|
default: /* Nothing else is acceptable. */
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*out_hwirq = hwirq;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void octeon_irq_ciu3_enable(struct irq_data *data)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
union cvmx_ciu3_iscx_ctl isc_ctl;
|
|
|
|
union cvmx_ciu3_iscx_w1c isc_w1c;
|
|
|
|
u64 isc_ctl_addr;
|
|
|
|
|
|
|
|
struct octeon_ciu_chip_data *cd;
|
|
|
|
|
|
|
|
cpu = next_cpu_for_irq(data);
|
|
|
|
|
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
|
|
|
|
isc_w1c.u64 = 0;
|
|
|
|
isc_w1c.s.en = 1;
|
|
|
|
cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
|
|
|
|
|
|
|
|
isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
|
|
|
|
isc_ctl.u64 = 0;
|
|
|
|
isc_ctl.s.en = 1;
|
|
|
|
isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
|
|
|
|
cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
|
|
|
|
cvmx_read_csr(isc_ctl_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void octeon_irq_ciu3_disable(struct irq_data *data)
|
|
|
|
{
|
|
|
|
u64 isc_ctl_addr;
|
|
|
|
union cvmx_ciu3_iscx_w1c isc_w1c;
|
|
|
|
|
|
|
|
struct octeon_ciu_chip_data *cd;
|
|
|
|
|
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
|
|
|
|
isc_w1c.u64 = 0;
|
|
|
|
isc_w1c.s.en = 1;
|
|
|
|
|
|
|
|
isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
|
|
|
|
cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
|
|
|
|
cvmx_write_csr(isc_ctl_addr, 0);
|
|
|
|
cvmx_read_csr(isc_ctl_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void octeon_irq_ciu3_ack(struct irq_data *data)
|
|
|
|
{
|
|
|
|
u64 isc_w1c_addr;
|
|
|
|
union cvmx_ciu3_iscx_w1c isc_w1c;
|
|
|
|
struct octeon_ciu_chip_data *cd;
|
|
|
|
u32 trigger_type = irqd_get_trigger_type(data);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We use a single irq_chip, so we have to do nothing to ack a
|
|
|
|
* level interrupt.
|
|
|
|
*/
|
|
|
|
if (!(trigger_type & IRQ_TYPE_EDGE_BOTH))
|
|
|
|
return;
|
|
|
|
|
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
|
|
|
|
isc_w1c.u64 = 0;
|
|
|
|
isc_w1c.s.raw = 1;
|
|
|
|
|
|
|
|
isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
|
|
|
|
cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
|
|
|
|
cvmx_read_csr(isc_w1c_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void octeon_irq_ciu3_mask(struct irq_data *data)
|
|
|
|
{
|
|
|
|
union cvmx_ciu3_iscx_w1c isc_w1c;
|
|
|
|
u64 isc_w1c_addr;
|
|
|
|
struct octeon_ciu_chip_data *cd;
|
|
|
|
|
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
|
|
|
|
isc_w1c.u64 = 0;
|
|
|
|
isc_w1c.s.en = 1;
|
|
|
|
|
|
|
|
isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
|
|
|
|
cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
|
|
|
|
cvmx_read_csr(isc_w1c_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void octeon_irq_ciu3_mask_ack(struct irq_data *data)
|
|
|
|
{
|
|
|
|
union cvmx_ciu3_iscx_w1c isc_w1c;
|
|
|
|
u64 isc_w1c_addr;
|
|
|
|
struct octeon_ciu_chip_data *cd;
|
|
|
|
u32 trigger_type = irqd_get_trigger_type(data);
|
|
|
|
|
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
|
|
|
|
isc_w1c.u64 = 0;
|
|
|
|
isc_w1c.s.en = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We use a single irq_chip, so only ack an edge (!level)
|
|
|
|
* interrupt.
|
|
|
|
*/
|
|
|
|
if (trigger_type & IRQ_TYPE_EDGE_BOTH)
|
|
|
|
isc_w1c.s.raw = 1;
|
|
|
|
|
|
|
|
isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
|
|
|
|
cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
|
|
|
|
cvmx_read_csr(isc_w1c_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
int octeon_irq_ciu3_set_affinity(struct irq_data *data,
|
|
|
|
const struct cpumask *dest, bool force)
|
|
|
|
{
|
|
|
|
union cvmx_ciu3_iscx_ctl isc_ctl;
|
|
|
|
union cvmx_ciu3_iscx_w1c isc_w1c;
|
|
|
|
u64 isc_ctl_addr;
|
|
|
|
int cpu;
|
|
|
|
bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
|
|
|
|
struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
|
|
|
|
|
|
|
|
if (!cpumask_subset(dest, cpumask_of_node(cd->ciu_node)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!enable_one)
|
|
|
|
return IRQ_SET_MASK_OK;
|
|
|
|
|
|
|
|
cd = irq_data_get_irq_chip_data(data);
|
|
|
|
cpu = cpumask_first(dest);
|
|
|
|
if (cpu >= nr_cpu_ids)
|
|
|
|
cpu = smp_processor_id();
|
|
|
|
cd->current_cpu = cpu;
|
|
|
|
|
|
|
|
isc_w1c.u64 = 0;
|
|
|
|
isc_w1c.s.en = 1;
|
|
|
|
cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
|
|
|
|
|
|
|
|
isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
|
|
|
|
isc_ctl.u64 = 0;
|
|
|
|
isc_ctl.s.en = 1;
|
|
|
|
isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
|
|
|
|
cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
|
|
|
|
cvmx_read_csr(isc_ctl_addr);
|
|
|
|
|
|
|
|
return IRQ_SET_MASK_OK;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static struct irq_chip octeon_irq_chip_ciu3 = {
|
|
|
|
.name = "CIU3",
|
|
|
|
.irq_startup = edge_startup,
|
|
|
|
.irq_enable = octeon_irq_ciu3_enable,
|
|
|
|
.irq_disable = octeon_irq_ciu3_disable,
|
|
|
|
.irq_ack = octeon_irq_ciu3_ack,
|
|
|
|
.irq_mask = octeon_irq_ciu3_mask,
|
|
|
|
.irq_mask_ack = octeon_irq_ciu3_mask_ack,
|
|
|
|
.irq_unmask = octeon_irq_ciu3_enable,
|
|
|
|
.irq_set_type = octeon_irq_ciu_set_type,
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
.irq_set_affinity = octeon_irq_ciu3_set_affinity,
|
|
|
|
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
int octeon_irq_ciu3_mapx(struct irq_domain *d, unsigned int virq,
|
|
|
|
irq_hw_number_t hw, struct irq_chip *chip)
|
|
|
|
{
|
|
|
|
struct octeon_ciu3_info *ciu3_info = d->host_data;
|
|
|
|
struct octeon_ciu_chip_data *cd = kzalloc_node(sizeof(*cd), GFP_KERNEL,
|
|
|
|
ciu3_info->node);
|
|
|
|
if (!cd)
|
|
|
|
return -ENOMEM;
|
|
|
|
cd->intsn = hw;
|
|
|
|
cd->current_cpu = -1;
|
|
|
|
cd->ciu3_addr = ciu3_info->ciu3_addr;
|
|
|
|
cd->ciu_node = ciu3_info->node;
|
|
|
|
irq_set_chip_and_handler(virq, chip, handle_edge_irq);
|
|
|
|
irq_set_chip_data(virq, cd);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int octeon_irq_ciu3_map(struct irq_domain *d,
|
|
|
|
unsigned int virq, irq_hw_number_t hw)
|
|
|
|
{
|
|
|
|
return octeon_irq_ciu3_mapx(d, virq, hw, &octeon_irq_chip_ciu3);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct irq_domain_ops octeon_dflt_domain_ciu3_ops = {
|
|
|
|
.map = octeon_irq_ciu3_map,
|
|
|
|
.unmap = octeon_irq_free_cd,
|
|
|
|
.xlate = octeon_irq_ciu3_xlat,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void octeon_irq_ciu3_ip2(void)
|
|
|
|
{
|
|
|
|
union cvmx_ciu3_destx_pp_int dest_pp_int;
|
|
|
|
struct octeon_ciu3_info *ciu3_info;
|
|
|
|
u64 ciu3_addr;
|
|
|
|
|
|
|
|
ciu3_info = __this_cpu_read(octeon_ciu3_info);
|
|
|
|
ciu3_addr = ciu3_info->ciu3_addr;
|
|
|
|
|
|
|
|
dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(3 * cvmx_get_local_core_num()));
|
|
|
|
|
|
|
|
if (likely(dest_pp_int.s.intr)) {
|
|
|
|
irq_hw_number_t intsn = dest_pp_int.s.intsn;
|
|
|
|
irq_hw_number_t hw;
|
|
|
|
struct irq_domain *domain;
|
|
|
|
/* Get the domain to use from the major block */
|
|
|
|
int block = intsn >> 12;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
domain = ciu3_info->domain[block];
|
|
|
|
if (ciu3_info->intsn2hw[block])
|
|
|
|
hw = ciu3_info->intsn2hw[block](domain, intsn);
|
|
|
|
else
|
|
|
|
hw = intsn;
|
|
|
|
|
|
|
|
ret = handle_domain_irq(domain, hw, NULL);
|
|
|
|
if (ret < 0) {
|
|
|
|
union cvmx_ciu3_iscx_w1c isc_w1c;
|
|
|
|
u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
|
|
|
|
|
|
|
|
isc_w1c.u64 = 0;
|
|
|
|
isc_w1c.s.en = 1;
|
|
|
|
cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
|
|
|
|
cvmx_read_csr(isc_w1c_addr);
|
|
|
|
spurious_interrupt();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
spurious_interrupt();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 10 mbox per core starting from zero.
|
|
|
|
* Base mbox is core * 10
|
|
|
|
*/
|
|
|
|
static unsigned int octeon_irq_ciu3_base_mbox_intsn(int core)
|
|
|
|
{
|
|
|
|
/* SW (mbox) are 0x04 in bits 12..19 */
|
|
|
|
return 0x04000 + CIU3_MBOX_PER_CORE * core;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int octeon_irq_ciu3_mbox_intsn_for_core(int core, unsigned int mbox)
|
|
|
|
{
|
|
|
|
return octeon_irq_ciu3_base_mbox_intsn(core) + mbox;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int octeon_irq_ciu3_mbox_intsn_for_cpu(int cpu, unsigned int mbox)
|
|
|
|
{
|
|
|
|
int local_core = octeon_coreid_for_cpu(cpu) & 0x3f;
|
|
|
|
|
|
|
|
return octeon_irq_ciu3_mbox_intsn_for_core(local_core, mbox);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu3_mbox(void)
|
|
|
|
{
|
|
|
|
union cvmx_ciu3_destx_pp_int dest_pp_int;
|
|
|
|
struct octeon_ciu3_info *ciu3_info;
|
|
|
|
u64 ciu3_addr;
|
|
|
|
int core = cvmx_get_local_core_num();
|
|
|
|
|
|
|
|
ciu3_info = __this_cpu_read(octeon_ciu3_info);
|
|
|
|
ciu3_addr = ciu3_info->ciu3_addr;
|
|
|
|
|
|
|
|
dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(1 + 3 * core));
|
|
|
|
|
|
|
|
if (likely(dest_pp_int.s.intr)) {
|
|
|
|
irq_hw_number_t intsn = dest_pp_int.s.intsn;
|
|
|
|
int mbox = intsn - octeon_irq_ciu3_base_mbox_intsn(core);
|
|
|
|
|
|
|
|
if (likely(mbox >= 0 && mbox < CIU3_MBOX_PER_CORE)) {
|
|
|
|
do_IRQ(mbox + OCTEON_IRQ_MBOX0);
|
|
|
|
} else {
|
|
|
|
union cvmx_ciu3_iscx_w1c isc_w1c;
|
|
|
|
u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
|
|
|
|
|
|
|
|
isc_w1c.u64 = 0;
|
|
|
|
isc_w1c.s.en = 1;
|
|
|
|
cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
|
|
|
|
cvmx_read_csr(isc_w1c_addr);
|
|
|
|
spurious_interrupt();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
spurious_interrupt();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void octeon_ciu3_mbox_send(int cpu, unsigned int mbox)
|
|
|
|
{
|
|
|
|
struct octeon_ciu3_info *ciu3_info;
|
|
|
|
unsigned int intsn;
|
|
|
|
union cvmx_ciu3_iscx_w1s isc_w1s;
|
|
|
|
u64 isc_w1s_addr;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(mbox >= CIU3_MBOX_PER_CORE))
|
|
|
|
return;
|
|
|
|
|
|
|
|
intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
|
|
|
|
ciu3_info = per_cpu(octeon_ciu3_info, cpu);
|
|
|
|
isc_w1s_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1S(intsn);
|
|
|
|
|
|
|
|
isc_w1s.u64 = 0;
|
|
|
|
isc_w1s.s.raw = 1;
|
|
|
|
|
|
|
|
cvmx_write_csr(isc_w1s_addr, isc_w1s.u64);
|
|
|
|
cvmx_read_csr(isc_w1s_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu3_mbox_set_enable(struct irq_data *data, int cpu, bool en)
|
|
|
|
{
|
|
|
|
struct octeon_ciu3_info *ciu3_info;
|
|
|
|
unsigned int intsn;
|
|
|
|
u64 isc_ctl_addr, isc_w1c_addr;
|
|
|
|
union cvmx_ciu3_iscx_ctl isc_ctl;
|
|
|
|
unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
|
|
|
|
|
|
|
|
intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
|
|
|
|
ciu3_info = per_cpu(octeon_ciu3_info, cpu);
|
|
|
|
isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
|
|
|
|
isc_ctl_addr = ciu3_info->ciu3_addr + CIU3_ISC_CTL(intsn);
|
|
|
|
|
|
|
|
isc_ctl.u64 = 0;
|
|
|
|
isc_ctl.s.en = 1;
|
|
|
|
|
|
|
|
cvmx_write_csr(isc_w1c_addr, isc_ctl.u64);
|
|
|
|
cvmx_write_csr(isc_ctl_addr, 0);
|
|
|
|
if (en) {
|
|
|
|
unsigned int idt = per_cpu(octeon_irq_ciu3_idt_ip3, cpu);
|
|
|
|
|
|
|
|
isc_ctl.u64 = 0;
|
|
|
|
isc_ctl.s.en = 1;
|
|
|
|
isc_ctl.s.idt = idt;
|
|
|
|
cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
|
|
|
|
}
|
|
|
|
cvmx_read_csr(isc_ctl_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu3_mbox_enable(struct irq_data *data)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
|
|
|
|
|
|
|
|
WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
|
|
|
|
|
|
|
|
for_each_online_cpu(cpu)
|
|
|
|
octeon_irq_ciu3_mbox_set_enable(data, cpu, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu3_mbox_disable(struct irq_data *data)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
|
|
|
|
|
|
|
|
WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
|
|
|
|
|
|
|
|
for_each_online_cpu(cpu)
|
|
|
|
octeon_irq_ciu3_mbox_set_enable(data, cpu, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu3_mbox_ack(struct irq_data *data)
|
|
|
|
{
|
|
|
|
struct octeon_ciu3_info *ciu3_info;
|
|
|
|
unsigned int intsn;
|
|
|
|
u64 isc_w1c_addr;
|
|
|
|
union cvmx_ciu3_iscx_w1c isc_w1c;
|
|
|
|
unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
|
|
|
|
|
|
|
|
intsn = octeon_irq_ciu3_mbox_intsn_for_core(cvmx_get_local_core_num(), mbox);
|
|
|
|
|
|
|
|
isc_w1c.u64 = 0;
|
|
|
|
isc_w1c.s.raw = 1;
|
|
|
|
|
|
|
|
ciu3_info = __this_cpu_read(octeon_ciu3_info);
|
|
|
|
isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
|
|
|
|
cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
|
|
|
|
cvmx_read_csr(isc_w1c_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu3_mbox_cpu_online(struct irq_data *data)
|
|
|
|
{
|
|
|
|
octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_ciu3_mbox_cpu_offline(struct irq_data *data)
|
|
|
|
{
|
|
|
|
octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int octeon_irq_ciu3_alloc_resources(struct octeon_ciu3_info *ciu3_info)
|
|
|
|
{
|
|
|
|
u64 b = ciu3_info->ciu3_addr;
|
|
|
|
int idt_ip2, idt_ip3, idt_ip4;
|
|
|
|
int unused_idt2;
|
|
|
|
int core = cvmx_get_local_core_num();
|
|
|
|
int i;
|
|
|
|
|
|
|
|
__this_cpu_write(octeon_ciu3_info, ciu3_info);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 4 idt per core starting from 1 because zero is reserved.
|
|
|
|
* Base idt per core is 4 * core + 1
|
|
|
|
*/
|
|
|
|
idt_ip2 = core * 4 + 1;
|
|
|
|
idt_ip3 = core * 4 + 2;
|
|
|
|
idt_ip4 = core * 4 + 3;
|
|
|
|
unused_idt2 = core * 4 + 4;
|
|
|
|
__this_cpu_write(octeon_irq_ciu3_idt_ip2, idt_ip2);
|
|
|
|
__this_cpu_write(octeon_irq_ciu3_idt_ip3, idt_ip3);
|
|
|
|
|
|
|
|
/* ip2 interrupts for this CPU */
|
|
|
|
cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip2), 0);
|
|
|
|
cvmx_write_csr(b + CIU3_IDT_PP(idt_ip2, 0), 1ull << core);
|
|
|
|
cvmx_write_csr(b + CIU3_IDT_IO(idt_ip2), 0);
|
|
|
|
|
|
|
|
/* ip3 interrupts for this CPU */
|
|
|
|
cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip3), 1);
|
|
|
|
cvmx_write_csr(b + CIU3_IDT_PP(idt_ip3, 0), 1ull << core);
|
|
|
|
cvmx_write_csr(b + CIU3_IDT_IO(idt_ip3), 0);
|
|
|
|
|
|
|
|
/* ip4 interrupts for this CPU */
|
|
|
|
cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip4), 2);
|
|
|
|
cvmx_write_csr(b + CIU3_IDT_PP(idt_ip4, 0), 0);
|
|
|
|
cvmx_write_csr(b + CIU3_IDT_IO(idt_ip4), 0);
|
|
|
|
|
|
|
|
cvmx_write_csr(b + CIU3_IDT_CTL(unused_idt2), 0);
|
|
|
|
cvmx_write_csr(b + CIU3_IDT_PP(unused_idt2, 0), 0);
|
|
|
|
cvmx_write_csr(b + CIU3_IDT_IO(unused_idt2), 0);
|
|
|
|
|
|
|
|
for (i = 0; i < CIU3_MBOX_PER_CORE; i++) {
|
|
|
|
unsigned int intsn = octeon_irq_ciu3_mbox_intsn_for_core(core, i);
|
|
|
|
|
|
|
|
cvmx_write_csr(b + CIU3_ISC_W1C(intsn), 2);
|
|
|
|
cvmx_write_csr(b + CIU3_ISC_CTL(intsn), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_irq_setup_secondary_ciu3(void)
|
|
|
|
{
|
|
|
|
struct octeon_ciu3_info *ciu3_info;
|
|
|
|
|
|
|
|
ciu3_info = octeon_ciu3_info_per_node[cvmx_get_node_num()];
|
|
|
|
octeon_irq_ciu3_alloc_resources(ciu3_info);
|
|
|
|
irq_cpu_online();
|
|
|
|
|
|
|
|
/* Enable the CIU lines */
|
|
|
|
set_c0_status(STATUSF_IP3 | STATUSF_IP2);
|
|
|
|
if (octeon_irq_use_ip4)
|
|
|
|
set_c0_status(STATUSF_IP4);
|
|
|
|
else
|
|
|
|
clear_c0_status(STATUSF_IP4);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct irq_chip octeon_irq_chip_ciu3_mbox = {
|
|
|
|
.name = "CIU3-M",
|
|
|
|
.irq_enable = octeon_irq_ciu3_mbox_enable,
|
|
|
|
.irq_disable = octeon_irq_ciu3_mbox_disable,
|
|
|
|
.irq_ack = octeon_irq_ciu3_mbox_ack,
|
|
|
|
|
|
|
|
.irq_cpu_online = octeon_irq_ciu3_mbox_cpu_online,
|
|
|
|
.irq_cpu_offline = octeon_irq_ciu3_mbox_cpu_offline,
|
|
|
|
.flags = IRQCHIP_ONOFFLINE_ENABLED,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init octeon_irq_init_ciu3(struct device_node *ciu_node,
|
|
|
|
struct device_node *parent)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int node;
|
|
|
|
struct irq_domain *domain;
|
|
|
|
struct octeon_ciu3_info *ciu3_info;
|
|
|
|
const __be32 *zero_addr;
|
|
|
|
u64 base_addr;
|
|
|
|
union cvmx_ciu3_const consts;
|
|
|
|
|
|
|
|
node = 0; /* of_node_to_nid(ciu_node); */
|
|
|
|
ciu3_info = kzalloc_node(sizeof(*ciu3_info), GFP_KERNEL, node);
|
|
|
|
|
|
|
|
if (!ciu3_info)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
zero_addr = of_get_address(ciu_node, 0, NULL, NULL);
|
|
|
|
if (WARN_ON(!zero_addr))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
base_addr = of_translate_address(ciu_node, zero_addr);
|
|
|
|
base_addr = (u64)phys_to_virt(base_addr);
|
|
|
|
|
|
|
|
ciu3_info->ciu3_addr = base_addr;
|
|
|
|
ciu3_info->node = node;
|
|
|
|
|
|
|
|
consts.u64 = cvmx_read_csr(base_addr + CIU3_CONST);
|
|
|
|
|
|
|
|
octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu3;
|
|
|
|
|
|
|
|
octeon_irq_ip2 = octeon_irq_ciu3_ip2;
|
|
|
|
octeon_irq_ip3 = octeon_irq_ciu3_mbox;
|
|
|
|
octeon_irq_ip4 = octeon_irq_ip4_mask;
|
|
|
|
|
|
|
|
if (node == cvmx_get_node_num()) {
|
|
|
|
/* Mips internal */
|
|
|
|
octeon_irq_init_core();
|
|
|
|
|
|
|
|
/* Only do per CPU things if it is the CIU of the boot node. */
|
|
|
|
i = irq_alloc_descs_from(OCTEON_IRQ_MBOX0, 8, node);
|
|
|
|
WARN_ON(i < 0);
|
|
|
|
|
|
|
|
for (i = 0; i < 8; i++)
|
|
|
|
irq_set_chip_and_handler(i + OCTEON_IRQ_MBOX0,
|
|
|
|
&octeon_irq_chip_ciu3_mbox, handle_percpu_irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize all domains to use the default domain. Specific major
|
|
|
|
* blocks will overwrite the default domain as needed.
|
|
|
|
*/
|
|
|
|
domain = irq_domain_add_tree(ciu_node, &octeon_dflt_domain_ciu3_ops,
|
|
|
|
ciu3_info);
|
|
|
|
for (i = 0; i < MAX_CIU3_DOMAINS; i++)
|
|
|
|
ciu3_info->domain[i] = domain;
|
|
|
|
|
|
|
|
octeon_ciu3_info_per_node[node] = ciu3_info;
|
|
|
|
|
|
|
|
if (node == cvmx_get_node_num()) {
|
|
|
|
/* Only do per CPU things if it is the CIU of the boot node. */
|
|
|
|
octeon_irq_ciu3_alloc_resources(ciu3_info);
|
|
|
|
if (node == 0)
|
|
|
|
irq_set_default_host(domain);
|
|
|
|
|
|
|
|
octeon_irq_use_ip4 = false;
|
|
|
|
/* Enable the CIU lines */
|
|
|
|
set_c0_status(STATUSF_IP2 | STATUSF_IP3);
|
|
|
|
clear_c0_status(STATUSF_IP4);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
static struct of_device_id ciu_types[] __initdata = {
|
|
|
|
{.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
|
|
|
|
{.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
|
|
|
|
{.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
|
2016-02-09 19:00:11 +00:00
|
|
|
{.compatible = "cavium,octeon-7890-ciu3", .data = octeon_irq_init_ciu3},
|
2015-01-15 13:11:19 +00:00
|
|
|
{.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
|
|
|
|
{}
|
|
|
|
};
|
|
|
|
|
2009-01-09 00:46:40 +00:00
|
|
|
void __init arch_init_irq(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/* Set the default affinity to the boot cpu. */
|
|
|
|
cpumask_clear(irq_default_affinity);
|
|
|
|
cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
|
|
|
|
#endif
|
2015-01-15 13:11:19 +00:00
|
|
|
of_irq_init(ciu_types);
|
2009-01-09 00:46:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage void plat_irq_dispatch(void)
|
|
|
|
{
|
|
|
|
unsigned long cop0_cause;
|
|
|
|
unsigned long cop0_status;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
cop0_cause = read_c0_cause();
|
|
|
|
cop0_status = read_c0_status();
|
|
|
|
cop0_cause &= cop0_status;
|
|
|
|
cop0_cause &= ST0_IM;
|
|
|
|
|
2015-01-15 13:11:19 +00:00
|
|
|
if (cop0_cause & STATUSF_IP2)
|
2011-03-25 19:38:51 +00:00
|
|
|
octeon_irq_ip2();
|
2015-01-15 13:11:19 +00:00
|
|
|
else if (cop0_cause & STATUSF_IP3)
|
2011-03-25 19:38:51 +00:00
|
|
|
octeon_irq_ip3();
|
2015-01-15 13:11:19 +00:00
|
|
|
else if (cop0_cause & STATUSF_IP4)
|
2011-03-25 19:38:51 +00:00
|
|
|
octeon_irq_ip4();
|
2015-01-15 13:11:19 +00:00
|
|
|
else if (cop0_cause)
|
2009-01-09 00:46:40 +00:00
|
|
|
do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
|
2011-03-25 19:38:51 +00:00
|
|
|
else
|
2009-01-09 00:46:40 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2009-06-23 09:36:38 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
|
2013-09-03 16:19:28 +00:00
|
|
|
void octeon_fixup_irqs(void)
|
2009-06-23 09:36:38 +00:00
|
|
|
{
|
2011-03-25 19:38:51 +00:00
|
|
|
irq_cpu_offline();
|
2009-06-23 09:36:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|