mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
Updates for the interrupt subsystem:
- Core: - Convert the interrupt descriptor storage to a maple tree to overcome the limitations of the radixtree + fixed size bitmap. This allows to handle real large servers with a huge number of guests without imposing a huge memory overhead on everyone. - Implement optional retriggering of interrupts which utilize the fasteoi handler to work around a GICv3 architecture issue. - Drivers: - A set of fixes and updates for the Loongson/Loongarch related drivers. - Workaound for an ASR8601 integration hickup which ends up with CPU numbering which can't be represented in the GIC implementation. - The usual set of boring fixes and updates all over the place. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmSZaf0THHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYoUgaD/9PwYvqeR12oJRz24gso6NNxlZ2nZMh KIApeIV4eoDPjM9Qdc38Tz+LbiClZuhiNRmxqzkaKmLsNObeYJhNvRg14bQA/Mfy t1kqO2rlNTSeRR5Y0XiQqFMIKCcpMQeKXzJ+ZQspiX08kCSl9UqBKpE5HgbTVFiB yTwdtagi8zrDr8KuETe+REKcwvoLippHrnz6evVMOXtN6Jdtz2maZT9dVDAvaVl7 pXgarzMScEFTfK8Q6wjH9ayC1UXPmSIIiirWZHYvtaAXh4/IY1U1LY4KqkVPQ1MB 7thv4CbE/Iyzw78FUMtrsMwqOV/fu71SfBh9uV6kFxoySFJ/gJ8QLOcAqkbNGyBf 9oRWuuY0LJZl1AKtmU6jNaS17JeOpdIdB44cAXBArYMbJUStZ2Mo2EDdw+/IHNzM tt32+Pjtg8BVrFLcR7gQ5rzAktz6678x9Qk6ys+KUCG3tuFyKx6RiD+f0DARe1Td DflNoJ6WTqwoimvTokAg6QGPUyHKJLe29ciSuUjHXaHJAE9xyeGtfJQWNLwpjejD KYYo5mb8cJc917Yx8LUOj02jVtebQtLezDtnUyGXrIR+ze4ZUQxhgvSKRDxX7E56 CjG3ghx6Ty1sTpjL4dHtXLJ1NgitFyjJ7VQlVqxWNQBNI+m3l2zmxj4zB9eI6v1R qyjKEgnFi60vSw== =qKCo -----END PGP SIGNATURE----- Merge tag 'irq-core-2023-06-26' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull irq updates from Thomas Gleixner: "Updates for the interrupt subsystem: Core: - Convert the interrupt descriptor storage to a maple tree to overcome the limitations of the radixtree + fixed size bitmap. This allows us to handle very large servers with a huge number of guests without imposing a huge memory overhead on everyone - Implement optional retriggering of interrupts which utilize the fasteoi handler to work around a GICv3 architecture issue Drivers: - A set of fixes and updates for the Loongson/Loongarch related drivers - Workaound for an ASR8601 integration hickup which ends up with CPU numbering which can't be represented in the GIC implementation - The usual set of boring fixes and updates all over the place" * tag 'irq-core-2023-06-26' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits) Revert "irqchip/mxs: Include linux/irqchip/mxs.h" irqchip/jcore-aic: Fix missing allocation of IRQ descriptors irqchip/stm32-exti: Fix warning on initialized field overwritten irqchip/stm32-exti: Add STM32MP15xx IWDG2 EXTI to GIC map irqchip/gicv3: Add a iort_pmsi_get_dev_id() prototype irqchip/mxs: Include linux/irqchip/mxs.h irqchip/clps711x: Remove unused clps711x_intc_init() function irqchip/mmp: Remove non-DT codepath irqchip/ftintc010: Mark all function static irqdomain: Include internals.h for function prototypes irqchip/loongson-eiointc: Add DT init support dt-bindings: interrupt-controller: Add Loongson EIOINTC irqchip/loongson-eiointc: Fix irq affinity setting during resume irqchip/loongson-liointc: Add IRQCHIP_SKIP_SET_WAKE flag irqchip/loongson-liointc: Fix IRQ trigger polarity irqchip/loongson-pch-pic: Fix potential incorrect hwirq assignment irqchip/loongson-pch-pic: Fix initialization of HT vector register irqchip/gic-v3-its: Enable RESEND_WHEN_IN_PROGRESS for LPIs genirq: Allow fasteoi handler to resend interrupts on concurrent handling genirq: Expand doc for PENDING and REPLAY flags ...
This commit is contained in:
commit
0017387938
23 changed files with 383 additions and 296 deletions
|
@ -214,3 +214,7 @@ stable kernels.
|
|||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ASR | ASR8601 | #8601001 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/interrupt-controller/loongson,eiointc.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Loongson Extended I/O Interrupt Controller
|
||||
|
||||
maintainers:
|
||||
- Binbin Zhou <zhoubinbin@loongson.cn>
|
||||
|
||||
description: |
|
||||
This interrupt controller is found on the Loongson-3 family chips and
|
||||
Loongson-2K series chips and is used to distribute interrupts directly to
|
||||
individual cores without forwarding them through the HT's interrupt line.
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/interrupt-controller.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- loongson,ls2k0500-eiointc
|
||||
- loongson,ls2k2000-eiointc
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
interrupt-controller: true
|
||||
|
||||
'#interrupt-cells':
|
||||
const: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- interrupt-controller
|
||||
- '#interrupt-cells'
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
eiointc: interrupt-controller@1fe11600 {
|
||||
compatible = "loongson,ls2k0500-eiointc";
|
||||
reg = <0x1fe10000 0x10000>;
|
||||
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
|
||||
interrupt-parent = <&cpuintc>;
|
||||
interrupts = <3>;
|
||||
};
|
||||
|
||||
...
|
|
@ -212,12 +212,6 @@ out_kfree:
|
|||
return err;
|
||||
}
|
||||
|
||||
void __init clps711x_intc_init(phys_addr_t base, resource_size_t size)
|
||||
{
|
||||
BUG_ON(_clps711x_intc_init(NULL, base, size));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQCHIP
|
||||
static int __init clps711x_intc_init_dt(struct device_node *np,
|
||||
struct device_node *parent)
|
||||
{
|
||||
|
@ -231,4 +225,3 @@ static int __init clps711x_intc_init_dt(struct device_node *np,
|
|||
return _clps711x_intc_init(np, res.start, resource_size(&res));
|
||||
}
|
||||
IRQCHIP_DECLARE(clps711x, "cirrus,ep7209-intc", clps711x_intc_init_dt);
|
||||
#endif
|
||||
|
|
|
@ -125,7 +125,7 @@ static struct irq_chip ft010_irq_chip = {
|
|||
/* Local static for the IRQ entry call */
|
||||
static struct ft010_irq_data firq;
|
||||
|
||||
asmlinkage void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *regs)
|
||||
static asmlinkage void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
struct ft010_irq_data *f = &firq;
|
||||
int irq;
|
||||
|
@ -162,7 +162,7 @@ static const struct irq_domain_ops ft010_irqdomain_ops = {
|
|||
.xlate = irq_domain_xlate_onetwocell,
|
||||
};
|
||||
|
||||
int __init ft010_of_init_irq(struct device_node *node,
|
||||
static int __init ft010_of_init_irq(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
struct ft010_irq_data *f = &firq;
|
||||
|
|
|
@ -3585,6 +3585,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
|||
irqd = irq_get_irq_data(virq + i);
|
||||
irqd_set_single_target(irqd);
|
||||
irqd_set_affinity_on_activate(irqd);
|
||||
irqd_set_resend_when_in_progress(irqd);
|
||||
pr_debug("ID:%d pID:%d vID:%d\n",
|
||||
(int)(hwirq + i - its_dev->event_map.lpi_base),
|
||||
(int)(hwirq + i), virq + i);
|
||||
|
@ -4523,6 +4524,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
|
|||
irq_domain_set_hwirq_and_chip(domain, virq + i, i,
|
||||
irqchip, vm->vpes[i]);
|
||||
set_bit(i, bitmap);
|
||||
irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
|
||||
}
|
||||
|
||||
if (err) {
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
|
||||
#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
|
||||
#define FLAGS_WORKAROUND_MTK_GICR_SAVE (1ULL << 2)
|
||||
#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 3)
|
||||
|
||||
#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
|
||||
|
||||
|
@ -656,10 +657,16 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u64 gic_mpidr_to_affinity(unsigned long mpidr)
|
||||
static u64 gic_cpu_to_affinity(int cpu)
|
||||
{
|
||||
u64 mpidr = cpu_logical_map(cpu);
|
||||
u64 aff;
|
||||
|
||||
/* ASR8601 needs to have its affinities shifted down... */
|
||||
if (unlikely(gic_data.flags & FLAGS_WORKAROUND_ASR_ERRATUM_8601001))
|
||||
mpidr = (MPIDR_AFFINITY_LEVEL(mpidr, 1) |
|
||||
(MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8));
|
||||
|
||||
aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
|
||||
|
@ -914,7 +921,7 @@ static void __init gic_dist_init(void)
|
|||
* Set all global interrupts to the boot CPU only. ARE must be
|
||||
* enabled.
|
||||
*/
|
||||
affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
|
||||
affinity = gic_cpu_to_affinity(smp_processor_id());
|
||||
for (i = 32; i < GIC_LINE_NR; i++)
|
||||
gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
|
||||
|
||||
|
@ -963,7 +970,7 @@ static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
|
|||
|
||||
static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
|
||||
{
|
||||
unsigned long mpidr = cpu_logical_map(smp_processor_id());
|
||||
unsigned long mpidr;
|
||||
u64 typer;
|
||||
u32 aff;
|
||||
|
||||
|
@ -971,6 +978,8 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
|
|||
* Convert affinity to a 32bit value that can be matched to
|
||||
* GICR_TYPER bits [63:32].
|
||||
*/
|
||||
mpidr = gic_cpu_to_affinity(smp_processor_id());
|
||||
|
||||
aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
|
||||
|
@ -1084,7 +1093,7 @@ static inline bool gic_dist_security_disabled(void)
|
|||
static void gic_cpu_sys_reg_init(void)
|
||||
{
|
||||
int i, cpu = smp_processor_id();
|
||||
u64 mpidr = cpu_logical_map(cpu);
|
||||
u64 mpidr = gic_cpu_to_affinity(cpu);
|
||||
u64 need_rss = MPIDR_RS(mpidr);
|
||||
bool group0;
|
||||
u32 pribits;
|
||||
|
@ -1183,11 +1192,11 @@ static void gic_cpu_sys_reg_init(void)
|
|||
for_each_online_cpu(i) {
|
||||
bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
|
||||
|
||||
need_rss |= MPIDR_RS(cpu_logical_map(i));
|
||||
need_rss |= MPIDR_RS(gic_cpu_to_affinity(i));
|
||||
if (need_rss && (!have_rss))
|
||||
pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
|
||||
cpu, (unsigned long)mpidr,
|
||||
i, (unsigned long)cpu_logical_map(i));
|
||||
i, (unsigned long)gic_cpu_to_affinity(i));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1263,9 +1272,11 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
|
|||
unsigned long cluster_id)
|
||||
{
|
||||
int next_cpu, cpu = *base_cpu;
|
||||
unsigned long mpidr = cpu_logical_map(cpu);
|
||||
unsigned long mpidr;
|
||||
u16 tlist = 0;
|
||||
|
||||
mpidr = gic_cpu_to_affinity(cpu);
|
||||
|
||||
while (cpu < nr_cpu_ids) {
|
||||
tlist |= 1 << (mpidr & 0xf);
|
||||
|
||||
|
@ -1274,7 +1285,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
|
|||
goto out;
|
||||
cpu = next_cpu;
|
||||
|
||||
mpidr = cpu_logical_map(cpu);
|
||||
mpidr = gic_cpu_to_affinity(cpu);
|
||||
|
||||
if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
|
||||
cpu--;
|
||||
|
@ -1319,7 +1330,7 @@ static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
|
|||
dsb(ishst);
|
||||
|
||||
for_each_cpu(cpu, mask) {
|
||||
u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
|
||||
u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu));
|
||||
u16 tlist;
|
||||
|
||||
tlist = gic_compute_target_list(&cpu, mask, cluster_id);
|
||||
|
@ -1377,7 +1388,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
|||
|
||||
offset = convert_offset_index(d, GICD_IROUTER, &index);
|
||||
reg = gic_dist_base(d) + offset + (index * 8);
|
||||
val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
|
||||
val = gic_cpu_to_affinity(cpu);
|
||||
|
||||
gic_write_irouter(val, reg);
|
||||
|
||||
|
@ -1796,12 +1807,26 @@ static bool gic_enable_quirk_nvidia_t241(void *data)
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool gic_enable_quirk_asr8601(void *data)
|
||||
{
|
||||
struct gic_chip_data *d = data;
|
||||
|
||||
d->flags |= FLAGS_WORKAROUND_ASR_ERRATUM_8601001;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct gic_quirk gic_quirks[] = {
|
||||
{
|
||||
.desc = "GICv3: Qualcomm MSM8996 broken firmware",
|
||||
.compatible = "qcom,msm8996-gic-v3",
|
||||
.init = gic_enable_quirk_msm8996,
|
||||
},
|
||||
{
|
||||
.desc = "GICv3: ASR erratum 8601001",
|
||||
.compatible = "asr,asr8601-gic-v3",
|
||||
.init = gic_enable_quirk_asr8601,
|
||||
},
|
||||
{
|
||||
.desc = "GICv3: Mediatek Chromebook GICR save problem",
|
||||
.property = "mediatek,broken-save-restore-fw",
|
||||
|
|
|
@ -68,6 +68,7 @@ static int __init aic_irq_of_init(struct device_node *node,
|
|||
unsigned min_irq = JCORE_AIC2_MIN_HWIRQ;
|
||||
unsigned dom_sz = JCORE_AIC_MAX_HWIRQ+1;
|
||||
struct irq_domain *domain;
|
||||
int ret;
|
||||
|
||||
pr_info("Initializing J-Core AIC\n");
|
||||
|
||||
|
@ -100,6 +101,12 @@ static int __init aic_irq_of_init(struct device_node *node,
|
|||
jcore_aic.irq_unmask = noop;
|
||||
jcore_aic.name = "AIC";
|
||||
|
||||
ret = irq_alloc_descs(-1, min_irq, dom_sz - min_irq,
|
||||
of_node_to_nid(node));
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
domain = irq_domain_add_legacy(node, dom_sz - min_irq, min_irq, min_irq,
|
||||
&jcore_aic_irqdomain_ops,
|
||||
&jcore_aic);
|
||||
|
|
|
@ -36,6 +36,7 @@ static int nr_pics;
|
|||
|
||||
struct eiointc_priv {
|
||||
u32 node;
|
||||
u32 vec_count;
|
||||
nodemask_t node_map;
|
||||
cpumask_t cpuspan_map;
|
||||
struct fwnode_handle *domain_handle;
|
||||
|
@ -153,18 +154,18 @@ static int eiointc_router_init(unsigned int cpu)
|
|||
if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) {
|
||||
eiointc_enable();
|
||||
|
||||
for (i = 0; i < VEC_COUNT / 32; i++) {
|
||||
for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) {
|
||||
data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2)));
|
||||
iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4);
|
||||
}
|
||||
|
||||
for (i = 0; i < VEC_COUNT / 32 / 4; i++) {
|
||||
for (i = 0; i < eiointc_priv[0]->vec_count / 32 / 4; i++) {
|
||||
bit = BIT(1 + index); /* Route to IP[1 + index] */
|
||||
data = bit | (bit << 8) | (bit << 16) | (bit << 24);
|
||||
iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4);
|
||||
}
|
||||
|
||||
for (i = 0; i < VEC_COUNT / 4; i++) {
|
||||
for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) {
|
||||
/* Route to Node-0 Core-0 */
|
||||
if (index == 0)
|
||||
bit = BIT(cpu_logical_map(0));
|
||||
|
@ -175,7 +176,7 @@ static int eiointc_router_init(unsigned int cpu)
|
|||
iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4);
|
||||
}
|
||||
|
||||
for (i = 0; i < VEC_COUNT / 32; i++) {
|
||||
for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) {
|
||||
data = 0xffffffff;
|
||||
iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4);
|
||||
iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4);
|
||||
|
@ -195,7 +196,7 @@ static void eiointc_irq_dispatch(struct irq_desc *desc)
|
|||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
for (i = 0; i < VEC_REG_COUNT; i++) {
|
||||
for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) {
|
||||
pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
|
||||
iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3));
|
||||
while (pending) {
|
||||
|
@ -310,11 +311,11 @@ static void eiointc_resume(void)
|
|||
eiointc_router_init(0);
|
||||
|
||||
for (i = 0; i < nr_pics; i++) {
|
||||
for (j = 0; j < VEC_COUNT; j++) {
|
||||
for (j = 0; j < eiointc_priv[0]->vec_count; j++) {
|
||||
desc = irq_resolve_mapping(eiointc_priv[i]->eiointc_domain, j);
|
||||
if (desc && desc->handle_irq && desc->handle_irq != handle_bad_irq) {
|
||||
raw_spin_lock(&desc->lock);
|
||||
irq_data = &desc->irq_data;
|
||||
irq_data = irq_domain_get_irq_data(eiointc_priv[i]->eiointc_domain, irq_desc_get_irq(desc));
|
||||
eiointc_set_irq_affinity(irq_data, irq_data->common->affinity, 0);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
|
@ -375,11 +376,47 @@ static int __init acpi_cascade_irqdomain_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq,
|
||||
u64 node_map)
|
||||
{
|
||||
int i;
|
||||
|
||||
node_map = node_map ? node_map : -1ULL;
|
||||
for_each_possible_cpu(i) {
|
||||
if (node_map & (1ULL << (cpu_to_eio_node(i)))) {
|
||||
node_set(cpu_to_eio_node(i), priv->node_map);
|
||||
cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map,
|
||||
cpumask_of(i));
|
||||
}
|
||||
}
|
||||
|
||||
priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle,
|
||||
priv->vec_count,
|
||||
&eiointc_domain_ops,
|
||||
priv);
|
||||
if (!priv->eiointc_domain) {
|
||||
pr_err("loongson-extioi: cannot add IRQ domain\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
eiointc_priv[nr_pics++] = priv;
|
||||
eiointc_router_init(0);
|
||||
irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
|
||||
|
||||
if (nr_pics == 1) {
|
||||
register_syscore_ops(&eiointc_syscore_ops);
|
||||
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
|
||||
"irqchip/loongarch/intc:starting",
|
||||
eiointc_router_init, NULL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init eiointc_acpi_init(struct irq_domain *parent,
|
||||
struct acpi_madt_eio_pic *acpi_eiointc)
|
||||
{
|
||||
int i, ret, parent_irq;
|
||||
unsigned long node_map;
|
||||
int parent_irq, ret;
|
||||
struct eiointc_priv *priv;
|
||||
int node;
|
||||
|
||||
|
@ -394,37 +431,14 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
|
|||
goto out_free_priv;
|
||||
}
|
||||
|
||||
priv->vec_count = VEC_COUNT;
|
||||
priv->node = acpi_eiointc->node;
|
||||
node_map = acpi_eiointc->node_map ? : -1ULL;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
if (node_map & (1ULL << cpu_to_eio_node(i))) {
|
||||
node_set(cpu_to_eio_node(i), priv->node_map);
|
||||
cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, cpumask_of(i));
|
||||
}
|
||||
}
|
||||
|
||||
/* Setup IRQ domain */
|
||||
priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, VEC_COUNT,
|
||||
&eiointc_domain_ops, priv);
|
||||
if (!priv->eiointc_domain) {
|
||||
pr_err("loongson-eiointc: cannot add IRQ domain\n");
|
||||
goto out_free_handle;
|
||||
}
|
||||
|
||||
eiointc_priv[nr_pics++] = priv;
|
||||
|
||||
eiointc_router_init(0);
|
||||
|
||||
parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade);
|
||||
irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
|
||||
|
||||
if (nr_pics == 1) {
|
||||
register_syscore_ops(&eiointc_syscore_ops);
|
||||
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
|
||||
"irqchip/loongarch/intc:starting",
|
||||
eiointc_router_init, NULL);
|
||||
}
|
||||
ret = eiointc_init(priv, parent_irq, acpi_eiointc->node_map);
|
||||
if (ret < 0)
|
||||
goto out_free_handle;
|
||||
|
||||
if (cpu_has_flatmode)
|
||||
node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
|
||||
|
@ -432,7 +446,10 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
|
|||
node = acpi_eiointc->node;
|
||||
acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
|
||||
acpi_set_vec_parent(node, priv->eiointc_domain, msi_group);
|
||||
|
||||
ret = acpi_cascade_irqdomain_init();
|
||||
if (ret < 0)
|
||||
goto out_free_handle;
|
||||
|
||||
return ret;
|
||||
|
||||
|
@ -444,3 +461,49 @@ out_free_priv:
|
|||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int __init eiointc_of_init(struct device_node *of_node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
int parent_irq, ret;
|
||||
struct eiointc_priv *priv;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
parent_irq = irq_of_parse_and_map(of_node, 0);
|
||||
if (parent_irq <= 0) {
|
||||
ret = -ENODEV;
|
||||
goto out_free_priv;
|
||||
}
|
||||
|
||||
ret = irq_set_handler_data(parent_irq, priv);
|
||||
if (ret < 0)
|
||||
goto out_free_priv;
|
||||
|
||||
/*
|
||||
* In particular, the number of devices supported by the LS2K0500
|
||||
* extended I/O interrupt vector is 128.
|
||||
*/
|
||||
if (of_device_is_compatible(of_node, "loongson,ls2k0500-eiointc"))
|
||||
priv->vec_count = 128;
|
||||
else
|
||||
priv->vec_count = VEC_COUNT;
|
||||
|
||||
priv->node = 0;
|
||||
priv->domain_handle = of_node_to_fwnode(of_node);
|
||||
|
||||
ret = eiointc_init(priv, parent_irq, 0);
|
||||
if (ret < 0)
|
||||
goto out_free_priv;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_priv:
|
||||
kfree(priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
IRQCHIP_DECLARE(loongson_ls2k0500_eiointc, "loongson,ls2k0500-eiointc", eiointc_of_init);
|
||||
IRQCHIP_DECLARE(loongson_ls2k2000_eiointc, "loongson,ls2k2000-eiointc", eiointc_of_init);
|
||||
|
|
|
@ -32,6 +32,10 @@
|
|||
#define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04)
|
||||
#define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08)
|
||||
#define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c)
|
||||
/*
|
||||
* LIOINTC_REG_INTC_POL register is only valid for Loongson-2K series, and
|
||||
* Loongson-3 series behave as noops.
|
||||
*/
|
||||
#define LIOINTC_REG_INTC_POL (LIOINTC_INTC_CHIP_START + 0x10)
|
||||
#define LIOINTC_REG_INTC_EDGE (LIOINTC_INTC_CHIP_START + 0x14)
|
||||
|
||||
|
@ -116,19 +120,19 @@ static int liointc_set_type(struct irq_data *data, unsigned int type)
|
|||
switch (type) {
|
||||
case IRQ_TYPE_LEVEL_HIGH:
|
||||
liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
|
||||
liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
|
||||
liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
|
||||
break;
|
||||
case IRQ_TYPE_LEVEL_LOW:
|
||||
liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
|
||||
liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
|
||||
liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_RISING:
|
||||
liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true);
|
||||
liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
|
||||
liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_FALLING:
|
||||
liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true);
|
||||
liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
|
||||
liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
|
||||
break;
|
||||
default:
|
||||
irq_gc_unlock_irqrestore(gc, flags);
|
||||
|
@ -291,6 +295,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
|
|||
ct->chip.irq_mask = irq_gc_mask_disable_reg;
|
||||
ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
|
||||
ct->chip.irq_set_type = liointc_set_type;
|
||||
ct->chip.flags = IRQCHIP_SKIP_SET_WAKE;
|
||||
|
||||
gc->mask_cache = 0;
|
||||
priv->gc = gc;
|
||||
|
|
|
@ -164,7 +164,7 @@ static int pch_pic_domain_translate(struct irq_domain *d,
|
|||
if (fwspec->param_count < 2)
|
||||
return -EINVAL;
|
||||
|
||||
*hwirq = fwspec->param[0] + priv->ht_vec_base;
|
||||
*hwirq = fwspec->param[0];
|
||||
*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
|
||||
} else {
|
||||
if (fwspec->param_count < 1)
|
||||
|
@ -196,7 +196,7 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
|
|||
|
||||
parent_fwspec.fwnode = domain->parent->fwnode;
|
||||
parent_fwspec.param_count = 1;
|
||||
parent_fwspec.param[0] = hwirq;
|
||||
parent_fwspec.param[0] = hwirq + priv->ht_vec_base;
|
||||
|
||||
err = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
|
||||
if (err)
|
||||
|
@ -401,14 +401,12 @@ static int __init acpi_cascade_irqdomain_init(void)
|
|||
int __init pch_pic_acpi_init(struct irq_domain *parent,
|
||||
struct acpi_madt_bio_pic *acpi_pchpic)
|
||||
{
|
||||
int ret, vec_base;
|
||||
int ret;
|
||||
struct fwnode_handle *domain_handle;
|
||||
|
||||
if (find_pch_pic(acpi_pchpic->gsi_base) >= 0)
|
||||
return 0;
|
||||
|
||||
vec_base = acpi_pchpic->gsi_base - GSI_MIN_PCH_IRQ;
|
||||
|
||||
domain_handle = irq_domain_alloc_fwnode(&acpi_pchpic->address);
|
||||
if (!domain_handle) {
|
||||
pr_err("Unable to allocate domain handle\n");
|
||||
|
@ -416,7 +414,7 @@ int __init pch_pic_acpi_init(struct irq_domain *parent,
|
|||
}
|
||||
|
||||
ret = pch_pic_init(acpi_pchpic->address, acpi_pchpic->size,
|
||||
vec_base, parent, domain_handle, acpi_pchpic->gsi_base);
|
||||
0, parent, domain_handle, acpi_pchpic->gsi_base);
|
||||
|
||||
if (ret < 0) {
|
||||
irq_domain_free_fwnode(domain_handle);
|
||||
|
|
|
@ -244,132 +244,6 @@ static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs)
|
|||
generic_handle_domain_irq(icu_data[0].domain, hwirq);
|
||||
}
|
||||
|
||||
/* MMP (ARMv5) */
|
||||
void __init icu_init_irq(void)
|
||||
{
|
||||
int irq;
|
||||
|
||||
max_icu_nr = 1;
|
||||
mmp_icu_base = ioremap(0xd4282000, 0x1000);
|
||||
icu_data[0].conf_enable = mmp_conf.conf_enable;
|
||||
icu_data[0].conf_disable = mmp_conf.conf_disable;
|
||||
icu_data[0].conf_mask = mmp_conf.conf_mask;
|
||||
icu_data[0].nr_irqs = 64;
|
||||
icu_data[0].virq_base = 0;
|
||||
icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
|
||||
&irq_domain_simple_ops,
|
||||
&icu_data[0]);
|
||||
for (irq = 0; irq < 64; irq++) {
|
||||
icu_mask_irq(irq_get_irq_data(irq));
|
||||
irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
|
||||
}
|
||||
irq_set_default_host(icu_data[0].domain);
|
||||
set_handle_irq(mmp_handle_irq);
|
||||
}
|
||||
|
||||
/* MMP2 (ARMv7) */
|
||||
void __init mmp2_init_icu(void)
|
||||
{
|
||||
int irq, end;
|
||||
|
||||
max_icu_nr = 8;
|
||||
mmp_icu_base = ioremap(0xd4282000, 0x1000);
|
||||
icu_data[0].conf_enable = mmp2_conf.conf_enable;
|
||||
icu_data[0].conf_disable = mmp2_conf.conf_disable;
|
||||
icu_data[0].conf_mask = mmp2_conf.conf_mask;
|
||||
icu_data[0].nr_irqs = 64;
|
||||
icu_data[0].virq_base = 0;
|
||||
icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
|
||||
&irq_domain_simple_ops,
|
||||
&icu_data[0]);
|
||||
icu_data[1].reg_status = mmp_icu_base + 0x150;
|
||||
icu_data[1].reg_mask = mmp_icu_base + 0x168;
|
||||
icu_data[1].clr_mfp_irq_base = icu_data[0].virq_base +
|
||||
icu_data[0].nr_irqs;
|
||||
icu_data[1].clr_mfp_hwirq = 1; /* offset to IRQ_MMP2_PMIC_BASE */
|
||||
icu_data[1].nr_irqs = 2;
|
||||
icu_data[1].cascade_irq = 4;
|
||||
icu_data[1].virq_base = icu_data[0].virq_base + icu_data[0].nr_irqs;
|
||||
icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
|
||||
icu_data[1].virq_base, 0,
|
||||
&irq_domain_simple_ops,
|
||||
&icu_data[1]);
|
||||
icu_data[2].reg_status = mmp_icu_base + 0x154;
|
||||
icu_data[2].reg_mask = mmp_icu_base + 0x16c;
|
||||
icu_data[2].nr_irqs = 2;
|
||||
icu_data[2].cascade_irq = 5;
|
||||
icu_data[2].virq_base = icu_data[1].virq_base + icu_data[1].nr_irqs;
|
||||
icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
|
||||
icu_data[2].virq_base, 0,
|
||||
&irq_domain_simple_ops,
|
||||
&icu_data[2]);
|
||||
icu_data[3].reg_status = mmp_icu_base + 0x180;
|
||||
icu_data[3].reg_mask = mmp_icu_base + 0x17c;
|
||||
icu_data[3].nr_irqs = 3;
|
||||
icu_data[3].cascade_irq = 9;
|
||||
icu_data[3].virq_base = icu_data[2].virq_base + icu_data[2].nr_irqs;
|
||||
icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
|
||||
icu_data[3].virq_base, 0,
|
||||
&irq_domain_simple_ops,
|
||||
&icu_data[3]);
|
||||
icu_data[4].reg_status = mmp_icu_base + 0x158;
|
||||
icu_data[4].reg_mask = mmp_icu_base + 0x170;
|
||||
icu_data[4].nr_irqs = 5;
|
||||
icu_data[4].cascade_irq = 17;
|
||||
icu_data[4].virq_base = icu_data[3].virq_base + icu_data[3].nr_irqs;
|
||||
icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
|
||||
icu_data[4].virq_base, 0,
|
||||
&irq_domain_simple_ops,
|
||||
&icu_data[4]);
|
||||
icu_data[5].reg_status = mmp_icu_base + 0x15c;
|
||||
icu_data[5].reg_mask = mmp_icu_base + 0x174;
|
||||
icu_data[5].nr_irqs = 15;
|
||||
icu_data[5].cascade_irq = 35;
|
||||
icu_data[5].virq_base = icu_data[4].virq_base + icu_data[4].nr_irqs;
|
||||
icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
|
||||
icu_data[5].virq_base, 0,
|
||||
&irq_domain_simple_ops,
|
||||
&icu_data[5]);
|
||||
icu_data[6].reg_status = mmp_icu_base + 0x160;
|
||||
icu_data[6].reg_mask = mmp_icu_base + 0x178;
|
||||
icu_data[6].nr_irqs = 2;
|
||||
icu_data[6].cascade_irq = 51;
|
||||
icu_data[6].virq_base = icu_data[5].virq_base + icu_data[5].nr_irqs;
|
||||
icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
|
||||
icu_data[6].virq_base, 0,
|
||||
&irq_domain_simple_ops,
|
||||
&icu_data[6]);
|
||||
icu_data[7].reg_status = mmp_icu_base + 0x188;
|
||||
icu_data[7].reg_mask = mmp_icu_base + 0x184;
|
||||
icu_data[7].nr_irqs = 2;
|
||||
icu_data[7].cascade_irq = 55;
|
||||
icu_data[7].virq_base = icu_data[6].virq_base + icu_data[6].nr_irqs;
|
||||
icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
|
||||
icu_data[7].virq_base, 0,
|
||||
&irq_domain_simple_ops,
|
||||
&icu_data[7]);
|
||||
end = icu_data[7].virq_base + icu_data[7].nr_irqs;
|
||||
for (irq = 0; irq < end; irq++) {
|
||||
icu_mask_irq(irq_get_irq_data(irq));
|
||||
if (irq == icu_data[1].cascade_irq ||
|
||||
irq == icu_data[2].cascade_irq ||
|
||||
irq == icu_data[3].cascade_irq ||
|
||||
irq == icu_data[4].cascade_irq ||
|
||||
irq == icu_data[5].cascade_irq ||
|
||||
irq == icu_data[6].cascade_irq ||
|
||||
irq == icu_data[7].cascade_irq) {
|
||||
irq_set_chip(irq, &icu_irq_chip);
|
||||
irq_set_chained_handler(irq, icu_mux_irq_demux);
|
||||
} else {
|
||||
irq_set_chip_and_handler(irq, &icu_irq_chip,
|
||||
handle_level_irq);
|
||||
}
|
||||
}
|
||||
irq_set_default_host(icu_data[0].domain);
|
||||
set_handle_irq(mmp2_handle_irq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static int __init mmp_init_bases(struct device_node *node)
|
||||
{
|
||||
int ret, nr_irqs, irq, i = 0;
|
||||
|
@ -548,4 +422,3 @@ err:
|
|||
return -EINVAL;
|
||||
}
|
||||
IRQCHIP_DECLARE(mmp2_mux_intc, "mrvl,mmp2-mux-intc", mmp2_mux_of_init);
|
||||
#endif
|
||||
|
|
|
@ -173,6 +173,16 @@ static struct irq_chip stm32_exti_h_chip_direct;
|
|||
#define EXTI_INVALID_IRQ U8_MAX
|
||||
#define STM32MP1_DESC_IRQ_SIZE (ARRAY_SIZE(stm32mp1_exti_banks) * IRQS_PER_BANK)
|
||||
|
||||
/*
|
||||
* Use some intentionally tricky logic here to initialize the whole array to
|
||||
* EXTI_INVALID_IRQ, but then override certain fields, requiring us to indicate
|
||||
* that we "know" that there are overrides in this structure, and we'll need to
|
||||
* disable that warning from W=1 builds.
|
||||
*/
|
||||
__diag_push();
|
||||
__diag_ignore_all("-Woverride-init",
|
||||
"logic to initialize all and then override some is OK");
|
||||
|
||||
static const u8 stm32mp1_desc_irq[] = {
|
||||
/* default value */
|
||||
[0 ... (STM32MP1_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ,
|
||||
|
@ -208,6 +218,7 @@ static const u8 stm32mp1_desc_irq[] = {
|
|||
[31] = 53,
|
||||
[32] = 82,
|
||||
[33] = 83,
|
||||
[46] = 151,
|
||||
[47] = 93,
|
||||
[48] = 138,
|
||||
[50] = 139,
|
||||
|
@ -266,6 +277,8 @@ static const u8 stm32mp13_desc_irq[] = {
|
|||
[70] = 98,
|
||||
};
|
||||
|
||||
__diag_pop();
|
||||
|
||||
static const struct stm32_exti_drv_data stm32mp1_drv_data = {
|
||||
.exti_banks = stm32mp1_exti_banks,
|
||||
.bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
|
||||
|
|
|
@ -26,13 +26,14 @@ int iort_register_domain_token(int trans_id, phys_addr_t base,
|
|||
struct fwnode_handle *fw_node);
|
||||
void iort_deregister_domain_token(int trans_id);
|
||||
struct fwnode_handle *iort_find_domain_token(int trans_id);
|
||||
int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
|
||||
|
||||
#ifdef CONFIG_ACPI_IORT
|
||||
void acpi_iort_init(void);
|
||||
u32 iort_msi_map_id(struct device *dev, u32 id);
|
||||
struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
|
||||
enum irq_domain_bus_token bus_token);
|
||||
void acpi_configure_pmsi_domain(struct device *dev);
|
||||
int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
|
||||
void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
|
||||
struct list_head *head);
|
||||
void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
|
||||
|
|
|
@ -223,32 +223,35 @@ struct irq_data {
|
|||
* irq_chip::irq_set_affinity() when deactivated.
|
||||
* IRQD_IRQ_ENABLED_ON_SUSPEND - Interrupt is enabled on suspend by irq pm if
|
||||
* irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set.
|
||||
* IRQD_RESEND_WHEN_IN_PROGRESS - Interrupt may fire when already in progress in which
|
||||
* case it must be resent at the next available opportunity.
|
||||
*/
|
||||
enum {
|
||||
IRQD_TRIGGER_MASK = 0xf,
|
||||
IRQD_SETAFFINITY_PENDING = (1 << 8),
|
||||
IRQD_ACTIVATED = (1 << 9),
|
||||
IRQD_NO_BALANCING = (1 << 10),
|
||||
IRQD_PER_CPU = (1 << 11),
|
||||
IRQD_AFFINITY_SET = (1 << 12),
|
||||
IRQD_LEVEL = (1 << 13),
|
||||
IRQD_WAKEUP_STATE = (1 << 14),
|
||||
IRQD_MOVE_PCNTXT = (1 << 15),
|
||||
IRQD_IRQ_DISABLED = (1 << 16),
|
||||
IRQD_IRQ_MASKED = (1 << 17),
|
||||
IRQD_IRQ_INPROGRESS = (1 << 18),
|
||||
IRQD_WAKEUP_ARMED = (1 << 19),
|
||||
IRQD_FORWARDED_TO_VCPU = (1 << 20),
|
||||
IRQD_AFFINITY_MANAGED = (1 << 21),
|
||||
IRQD_IRQ_STARTED = (1 << 22),
|
||||
IRQD_MANAGED_SHUTDOWN = (1 << 23),
|
||||
IRQD_SINGLE_TARGET = (1 << 24),
|
||||
IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
|
||||
IRQD_CAN_RESERVE = (1 << 26),
|
||||
IRQD_MSI_NOMASK_QUIRK = (1 << 27),
|
||||
IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28),
|
||||
IRQD_AFFINITY_ON_ACTIVATE = (1 << 29),
|
||||
IRQD_IRQ_ENABLED_ON_SUSPEND = (1 << 30),
|
||||
IRQD_SETAFFINITY_PENDING = BIT(8),
|
||||
IRQD_ACTIVATED = BIT(9),
|
||||
IRQD_NO_BALANCING = BIT(10),
|
||||
IRQD_PER_CPU = BIT(11),
|
||||
IRQD_AFFINITY_SET = BIT(12),
|
||||
IRQD_LEVEL = BIT(13),
|
||||
IRQD_WAKEUP_STATE = BIT(14),
|
||||
IRQD_MOVE_PCNTXT = BIT(15),
|
||||
IRQD_IRQ_DISABLED = BIT(16),
|
||||
IRQD_IRQ_MASKED = BIT(17),
|
||||
IRQD_IRQ_INPROGRESS = BIT(18),
|
||||
IRQD_WAKEUP_ARMED = BIT(19),
|
||||
IRQD_FORWARDED_TO_VCPU = BIT(20),
|
||||
IRQD_AFFINITY_MANAGED = BIT(21),
|
||||
IRQD_IRQ_STARTED = BIT(22),
|
||||
IRQD_MANAGED_SHUTDOWN = BIT(23),
|
||||
IRQD_SINGLE_TARGET = BIT(24),
|
||||
IRQD_DEFAULT_TRIGGER_SET = BIT(25),
|
||||
IRQD_CAN_RESERVE = BIT(26),
|
||||
IRQD_MSI_NOMASK_QUIRK = BIT(27),
|
||||
IRQD_HANDLE_ENFORCE_IRQCTX = BIT(28),
|
||||
IRQD_AFFINITY_ON_ACTIVATE = BIT(29),
|
||||
IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(30),
|
||||
IRQD_RESEND_WHEN_IN_PROGRESS = BIT(31),
|
||||
};
|
||||
|
||||
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
|
||||
|
@ -448,6 +451,16 @@ static inline bool irqd_affinity_on_activate(struct irq_data *d)
|
|||
return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
|
||||
}
|
||||
|
||||
static inline void irqd_set_resend_when_in_progress(struct irq_data *d)
|
||||
{
|
||||
__irqd_to_state(d) |= IRQD_RESEND_WHEN_IN_PROGRESS;
|
||||
}
|
||||
|
||||
static inline bool irqd_needs_resend_when_in_progress(struct irq_data *d)
|
||||
{
|
||||
return __irqd_to_state(d) & IRQD_RESEND_WHEN_IN_PROGRESS;
|
||||
}
|
||||
|
||||
#undef __irqd_to_state
|
||||
|
||||
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __IRQCHIP_MMP_H
|
||||
#define __IRQCHIP_MMP_H
|
||||
|
||||
extern struct irq_chip icu_irq_chip;
|
||||
|
||||
extern void icu_init_irq(void);
|
||||
extern void mmp2_init_icu(void);
|
||||
|
||||
#endif /* __IRQCHIP_MMP_H */
|
|
@ -102,6 +102,9 @@ struct irq_desc {
|
|||
int parent_irq;
|
||||
struct module *owner;
|
||||
const char *name;
|
||||
#ifdef CONFIG_HARDIRQS_SW_RESEND
|
||||
struct hlist_node resend_node;
|
||||
#endif
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
|
|
|
@ -306,6 +306,7 @@ static void __irq_disable(struct irq_desc *desc, bool mask);
|
|||
void irq_shutdown(struct irq_desc *desc)
|
||||
{
|
||||
if (irqd_is_started(&desc->irq_data)) {
|
||||
clear_irq_resend(desc);
|
||||
desc->depth = 1;
|
||||
if (desc->irq_data.chip->irq_shutdown) {
|
||||
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
|
||||
|
@ -692,8 +693,16 @@ void handle_fasteoi_irq(struct irq_desc *desc)
|
|||
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
if (!irq_may_run(desc))
|
||||
/*
|
||||
* When an affinity change races with IRQ handling, the next interrupt
|
||||
* can arrive on the new CPU before the original CPU has completed
|
||||
* handling the previous one - it may need to be resent.
|
||||
*/
|
||||
if (!irq_may_run(desc)) {
|
||||
if (irqd_needs_resend_when_in_progress(&desc->irq_data))
|
||||
desc->istate |= IRQS_PENDING;
|
||||
goto out;
|
||||
}
|
||||
|
||||
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
||||
|
||||
|
@ -715,6 +724,12 @@ void handle_fasteoi_irq(struct irq_desc *desc)
|
|||
|
||||
cond_unmask_eoi_irq(desc, chip);
|
||||
|
||||
/*
|
||||
* When the race described above happens this will resend the interrupt.
|
||||
*/
|
||||
if (unlikely(desc->istate & IRQS_PENDING))
|
||||
check_irq_resend(desc, false);
|
||||
|
||||
raw_spin_unlock(&desc->lock);
|
||||
return;
|
||||
out:
|
||||
|
|
|
@ -133,6 +133,8 @@ static const struct irq_bit_descr irqdata_states[] = {
|
|||
BIT_MASK_DESCR(IRQD_HANDLE_ENFORCE_IRQCTX),
|
||||
|
||||
BIT_MASK_DESCR(IRQD_IRQ_ENABLED_ON_SUSPEND),
|
||||
|
||||
BIT_MASK_DESCR(IRQD_RESEND_WHEN_IN_PROGRESS),
|
||||
};
|
||||
|
||||
static const struct irq_bit_descr irqdesc_states[] = {
|
||||
|
|
|
@ -12,9 +12,9 @@
|
|||
#include <linux/sched/clock.h>
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
# define IRQ_BITMAP_BITS (NR_IRQS + 8196)
|
||||
# define MAX_SPARSE_IRQS INT_MAX
|
||||
#else
|
||||
# define IRQ_BITMAP_BITS NR_IRQS
|
||||
# define MAX_SPARSE_IRQS NR_IRQS
|
||||
#endif
|
||||
|
||||
#define istate core_internal_state__do_not_mess_with_it
|
||||
|
@ -47,9 +47,12 @@ enum {
|
|||
* detection
|
||||
* IRQS_POLL_INPROGRESS - polling in progress
|
||||
* IRQS_ONESHOT - irq is not unmasked in primary handler
|
||||
* IRQS_REPLAY - irq is replayed
|
||||
* IRQS_REPLAY - irq has been resent and will not be resent
|
||||
* again until the handler has run and cleared
|
||||
* this flag.
|
||||
* IRQS_WAITING - irq is waiting
|
||||
* IRQS_PENDING - irq is pending and replayed later
|
||||
* IRQS_PENDING - irq needs to be resent and should be resent
|
||||
* at the next available opportunity.
|
||||
* IRQS_SUSPENDED - irq is suspended
|
||||
* IRQS_NMI - irq line is used to deliver NMIs
|
||||
* IRQS_SYSFS - descriptor has been added to sysfs
|
||||
|
@ -113,6 +116,8 @@ irqreturn_t handle_irq_event(struct irq_desc *desc);
|
|||
|
||||
/* Resending of interrupts :*/
|
||||
int check_irq_resend(struct irq_desc *desc, bool inject);
|
||||
void clear_irq_resend(struct irq_desc *desc);
|
||||
void irq_resend_init(struct irq_desc *desc);
|
||||
bool irq_wait_for_poll(struct irq_desc *desc);
|
||||
void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action);
|
||||
|
||||
|
|
|
@ -12,8 +12,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/maple_tree.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/sysfs.h>
|
||||
|
||||
|
@ -131,7 +130,40 @@ int nr_irqs = NR_IRQS;
|
|||
EXPORT_SYMBOL_GPL(nr_irqs);
|
||||
|
||||
static DEFINE_MUTEX(sparse_irq_lock);
|
||||
static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
|
||||
static struct maple_tree sparse_irqs = MTREE_INIT_EXT(sparse_irqs,
|
||||
MT_FLAGS_ALLOC_RANGE |
|
||||
MT_FLAGS_LOCK_EXTERN |
|
||||
MT_FLAGS_USE_RCU,
|
||||
sparse_irq_lock);
|
||||
|
||||
static int irq_find_free_area(unsigned int from, unsigned int cnt)
|
||||
{
|
||||
MA_STATE(mas, &sparse_irqs, 0, 0);
|
||||
|
||||
if (mas_empty_area(&mas, from, MAX_SPARSE_IRQS, cnt))
|
||||
return -ENOSPC;
|
||||
return mas.index;
|
||||
}
|
||||
|
||||
static unsigned int irq_find_at_or_after(unsigned int offset)
|
||||
{
|
||||
unsigned long index = offset;
|
||||
struct irq_desc *desc = mt_find(&sparse_irqs, &index, nr_irqs);
|
||||
|
||||
return desc ? irq_desc_get_irq(desc) : nr_irqs;
|
||||
}
|
||||
|
||||
static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
MA_STATE(mas, &sparse_irqs, irq, irq);
|
||||
WARN_ON(mas_store_gfp(&mas, desc, GFP_KERNEL) != 0);
|
||||
}
|
||||
|
||||
static void delete_irq_desc(unsigned int irq)
|
||||
{
|
||||
MA_STATE(mas, &sparse_irqs, irq, irq);
|
||||
mas_erase(&mas);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
|
||||
|
@ -344,26 +376,14 @@ static void irq_sysfs_del(struct irq_desc *desc) {}
|
|||
|
||||
#endif /* CONFIG_SYSFS */
|
||||
|
||||
static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
|
||||
|
||||
static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
radix_tree_insert(&irq_desc_tree, irq, desc);
|
||||
}
|
||||
|
||||
struct irq_desc *irq_to_desc(unsigned int irq)
|
||||
{
|
||||
return radix_tree_lookup(&irq_desc_tree, irq);
|
||||
return mtree_load(&sparse_irqs, irq);
|
||||
}
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE
|
||||
EXPORT_SYMBOL_GPL(irq_to_desc);
|
||||
#endif
|
||||
|
||||
static void delete_irq_desc(unsigned int irq)
|
||||
{
|
||||
radix_tree_delete(&irq_desc_tree, irq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void free_masks(struct irq_desc *desc)
|
||||
{
|
||||
|
@ -415,6 +435,7 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
|
|||
desc_set_defaults(irq, desc, node, affinity, owner);
|
||||
irqd_set(&desc->irq_data, flags);
|
||||
kobject_init(&desc->kobj, &irq_kobj_type);
|
||||
irq_resend_init(desc);
|
||||
|
||||
return desc;
|
||||
|
||||
|
@ -505,7 +526,6 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
|
|||
irq_sysfs_add(start + i, desc);
|
||||
irq_add_debugfs_entry(start + i, desc);
|
||||
}
|
||||
bitmap_set(allocated_irqs, start, cnt);
|
||||
return start;
|
||||
|
||||
err:
|
||||
|
@ -516,7 +536,7 @@ err:
|
|||
|
||||
static int irq_expand_nr_irqs(unsigned int nr)
|
||||
{
|
||||
if (nr > IRQ_BITMAP_BITS)
|
||||
if (nr > MAX_SPARSE_IRQS)
|
||||
return -ENOMEM;
|
||||
nr_irqs = nr;
|
||||
return 0;
|
||||
|
@ -534,18 +554,17 @@ int __init early_irq_init(void)
|
|||
printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n",
|
||||
NR_IRQS, nr_irqs, initcnt);
|
||||
|
||||
if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
|
||||
nr_irqs = IRQ_BITMAP_BITS;
|
||||
if (WARN_ON(nr_irqs > MAX_SPARSE_IRQS))
|
||||
nr_irqs = MAX_SPARSE_IRQS;
|
||||
|
||||
if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
|
||||
initcnt = IRQ_BITMAP_BITS;
|
||||
if (WARN_ON(initcnt > MAX_SPARSE_IRQS))
|
||||
initcnt = MAX_SPARSE_IRQS;
|
||||
|
||||
if (initcnt > nr_irqs)
|
||||
nr_irqs = initcnt;
|
||||
|
||||
for (i = 0; i < initcnt; i++) {
|
||||
desc = alloc_desc(i, node, 0, NULL, NULL);
|
||||
set_bit(i, allocated_irqs);
|
||||
irq_insert_desc(i, desc);
|
||||
}
|
||||
return arch_early_irq_init();
|
||||
|
@ -581,6 +600,7 @@ int __init early_irq_init(void)
|
|||
mutex_init(&desc[i].request_mutex);
|
||||
init_waitqueue_head(&desc[i].wait_for_threads);
|
||||
desc_set_defaults(i, &desc[i], node, NULL, NULL);
|
||||
irq_resend_init(desc);
|
||||
}
|
||||
return arch_early_irq_init();
|
||||
}
|
||||
|
@ -599,6 +619,7 @@ static void free_desc(unsigned int irq)
|
|||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
delete_irq_desc(irq);
|
||||
}
|
||||
|
||||
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
|
||||
|
@ -611,8 +632,8 @@ static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
|
|||
struct irq_desc *desc = irq_to_desc(start + i);
|
||||
|
||||
desc->owner = owner;
|
||||
irq_insert_desc(start + i, desc);
|
||||
}
|
||||
bitmap_set(allocated_irqs, start, cnt);
|
||||
return start;
|
||||
}
|
||||
|
||||
|
@ -624,7 +645,7 @@ static int irq_expand_nr_irqs(unsigned int nr)
|
|||
void irq_mark_irq(unsigned int irq)
|
||||
{
|
||||
mutex_lock(&sparse_irq_lock);
|
||||
bitmap_set(allocated_irqs, irq, 1);
|
||||
irq_insert_desc(irq, irq_desc + irq);
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
}
|
||||
|
||||
|
@ -768,7 +789,6 @@ void irq_free_descs(unsigned int from, unsigned int cnt)
|
|||
for (i = 0; i < cnt; i++)
|
||||
free_desc(from + i);
|
||||
|
||||
bitmap_clear(allocated_irqs, from, cnt);
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_free_descs);
|
||||
|
@ -810,8 +830,7 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
|
|||
|
||||
mutex_lock(&sparse_irq_lock);
|
||||
|
||||
start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
|
||||
from, cnt, 0);
|
||||
start = irq_find_free_area(from, cnt);
|
||||
ret = -EEXIST;
|
||||
if (irq >=0 && start != irq)
|
||||
goto unlock;
|
||||
|
@ -836,7 +855,7 @@ EXPORT_SYMBOL_GPL(__irq_alloc_descs);
|
|||
*/
|
||||
unsigned int irq_get_next_irq(unsigned int offset)
|
||||
{
|
||||
return find_next_bit(allocated_irqs, nr_irqs, offset);
|
||||
return irq_find_at_or_after(offset);
|
||||
}
|
||||
|
||||
struct irq_desc *
|
||||
|
|
|
@ -1915,6 +1915,8 @@ static void irq_domain_check_hierarchy(struct irq_domain *domain)
|
|||
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
|
||||
|
||||
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
|
||||
#include "internals.h"
|
||||
|
||||
static struct dentry *domain_dir;
|
||||
|
||||
static void
|
||||
|
|
|
@ -21,8 +21,9 @@
|
|||
|
||||
#ifdef CONFIG_HARDIRQS_SW_RESEND
|
||||
|
||||
/* Bitmap to handle software resend of interrupts: */
|
||||
static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
|
||||
/* hlist_head to handle software resend of interrupts: */
|
||||
static HLIST_HEAD(irq_resend_list);
|
||||
static DEFINE_RAW_SPINLOCK(irq_resend_lock);
|
||||
|
||||
/*
|
||||
* Run software resends of IRQ's
|
||||
|
@ -30,18 +31,17 @@ static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
|
|||
static void resend_irqs(struct tasklet_struct *unused)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
int irq;
|
||||
|
||||
while (!bitmap_empty(irqs_resend, nr_irqs)) {
|
||||
irq = find_first_bit(irqs_resend, nr_irqs);
|
||||
clear_bit(irq, irqs_resend);
|
||||
desc = irq_to_desc(irq);
|
||||
if (!desc)
|
||||
continue;
|
||||
local_irq_disable();
|
||||
raw_spin_lock_irq(&irq_resend_lock);
|
||||
while (!hlist_empty(&irq_resend_list)) {
|
||||
desc = hlist_entry(irq_resend_list.first, struct irq_desc,
|
||||
resend_node);
|
||||
hlist_del_init(&desc->resend_node);
|
||||
raw_spin_unlock(&irq_resend_lock);
|
||||
desc->handle_irq(desc);
|
||||
local_irq_enable();
|
||||
raw_spin_lock(&irq_resend_lock);
|
||||
}
|
||||
raw_spin_unlock_irq(&irq_resend_lock);
|
||||
}
|
||||
|
||||
/* Tasklet to handle resend: */
|
||||
|
@ -49,8 +49,6 @@ static DECLARE_TASKLET(resend_tasklet, resend_irqs);
|
|||
|
||||
static int irq_sw_resend(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int irq = irq_desc_get_irq(desc);
|
||||
|
||||
/*
|
||||
* Validate whether this interrupt can be safely injected from
|
||||
* non interrupt context
|
||||
|
@ -70,16 +68,31 @@ static int irq_sw_resend(struct irq_desc *desc)
|
|||
*/
|
||||
if (!desc->parent_irq)
|
||||
return -EINVAL;
|
||||
irq = desc->parent_irq;
|
||||
}
|
||||
|
||||
/* Set it pending and activate the softirq: */
|
||||
set_bit(irq, irqs_resend);
|
||||
/* Add to resend_list and activate the softirq: */
|
||||
raw_spin_lock(&irq_resend_lock);
|
||||
hlist_add_head(&desc->resend_node, &irq_resend_list);
|
||||
raw_spin_unlock(&irq_resend_lock);
|
||||
tasklet_schedule(&resend_tasklet);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void clear_irq_resend(struct irq_desc *desc)
|
||||
{
|
||||
raw_spin_lock(&irq_resend_lock);
|
||||
hlist_del_init(&desc->resend_node);
|
||||
raw_spin_unlock(&irq_resend_lock);
|
||||
}
|
||||
|
||||
void irq_resend_init(struct irq_desc *desc)
|
||||
{
|
||||
INIT_HLIST_NODE(&desc->resend_node);
|
||||
}
|
||||
#else
|
||||
void clear_irq_resend(struct irq_desc *desc) {}
|
||||
void irq_resend_init(struct irq_desc *desc) {}
|
||||
|
||||
static int irq_sw_resend(struct irq_desc *desc)
|
||||
{
|
||||
return -EINVAL;
|
||||
|
|
|
@ -80,21 +80,6 @@ static void wakeup_softirqd(void)
|
|||
wake_up_process(tsk);
|
||||
}
|
||||
|
||||
/*
|
||||
* If ksoftirqd is scheduled, we do not want to process pending softirqs
|
||||
* right now. Let ksoftirqd handle this at its own rate, to get fairness,
|
||||
* unless we're doing some of the synchronous softirqs.
|
||||
*/
|
||||
#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
|
||||
static bool ksoftirqd_running(unsigned long pending)
|
||||
{
|
||||
struct task_struct *tsk = __this_cpu_read(ksoftirqd);
|
||||
|
||||
if (pending & SOFTIRQ_NOW_MASK)
|
||||
return false;
|
||||
return tsk && task_is_running(tsk) && !__kthread_should_park(tsk);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
DEFINE_PER_CPU(int, hardirqs_enabled);
|
||||
DEFINE_PER_CPU(int, hardirq_context);
|
||||
|
@ -236,7 +221,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
|
|||
goto out;
|
||||
|
||||
pending = local_softirq_pending();
|
||||
if (!pending || ksoftirqd_running(pending))
|
||||
if (!pending)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
|
@ -432,9 +417,6 @@ static inline bool should_wake_ksoftirqd(void)
|
|||
|
||||
static inline void invoke_softirq(void)
|
||||
{
|
||||
if (ksoftirqd_running(local_softirq_pending()))
|
||||
return;
|
||||
|
||||
if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
|
||||
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
|
||||
/*
|
||||
|
@ -468,7 +450,7 @@ asmlinkage __visible void do_softirq(void)
|
|||
|
||||
pending = local_softirq_pending();
|
||||
|
||||
if (pending && !ksoftirqd_running(pending))
|
||||
if (pending)
|
||||
do_softirq_own_stack();
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue