|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* |
| 3 | + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited |
| 4 | + */ |
| 5 | + |
| 6 | +#include <linux/init.h> |
| 7 | +#include <linux/kernel.h> |
| 8 | +#include <linux/interrupt.h> |
| 9 | +#include <linux/irq.h> |
| 10 | +#include <linux/irqchip.h> |
| 11 | +#include <linux/irqdomain.h> |
| 12 | + |
| 13 | +#include <asm/loongarch.h> |
| 14 | +#include <asm/setup.h> |
| 15 | + |
| 16 | +static struct irq_domain *irq_domain; |
| 17 | +struct fwnode_handle *cpuintc_handle; |
| 18 | + |
| 19 | +static void mask_loongarch_irq(struct irq_data *d) |
| 20 | +{ |
| 21 | + clear_csr_ecfg(ECFGF(d->hwirq)); |
| 22 | +} |
| 23 | + |
| 24 | +static void unmask_loongarch_irq(struct irq_data *d) |
| 25 | +{ |
| 26 | + set_csr_ecfg(ECFGF(d->hwirq)); |
| 27 | +} |
| 28 | + |
| 29 | +static struct irq_chip cpu_irq_controller = { |
| 30 | + .name = "CPUINTC", |
| 31 | + .irq_mask = mask_loongarch_irq, |
| 32 | + .irq_unmask = unmask_loongarch_irq, |
| 33 | +}; |
| 34 | + |
| 35 | +static void handle_cpu_irq(struct pt_regs *regs) |
| 36 | +{ |
| 37 | + int hwirq; |
| 38 | + unsigned int estat = read_csr_estat() & CSR_ESTAT_IS; |
| 39 | + |
| 40 | + while ((hwirq = ffs(estat))) { |
| 41 | + estat &= ~BIT(hwirq - 1); |
| 42 | + generic_handle_domain_irq(irq_domain, hwirq - 1); |
| 43 | + } |
| 44 | +} |
| 45 | + |
| 46 | +static int loongarch_cpu_intc_map(struct irq_domain *d, unsigned int irq, |
| 47 | + irq_hw_number_t hwirq) |
| 48 | +{ |
| 49 | + irq_set_noprobe(irq); |
| 50 | + irq_set_chip_and_handler(irq, &cpu_irq_controller, handle_percpu_irq); |
| 51 | + |
| 52 | + return 0; |
| 53 | +} |
| 54 | + |
| 55 | +static const struct irq_domain_ops loongarch_cpu_intc_irq_domain_ops = { |
| 56 | + .map = loongarch_cpu_intc_map, |
| 57 | + .xlate = irq_domain_xlate_onecell, |
| 58 | +}; |
| 59 | + |
| 60 | +static int __init |
| 61 | +liointc_parse_madt(union acpi_subtable_headers *header, |
| 62 | + const unsigned long end) |
| 63 | +{ |
| 64 | + struct acpi_madt_lio_pic *liointc_entry = (struct acpi_madt_lio_pic *)header; |
| 65 | + |
| 66 | + return liointc_acpi_init(irq_domain, liointc_entry); |
| 67 | +} |
| 68 | + |
| 69 | +static int __init |
| 70 | +eiointc_parse_madt(union acpi_subtable_headers *header, |
| 71 | + const unsigned long end) |
| 72 | +{ |
| 73 | + struct acpi_madt_eio_pic *eiointc_entry = (struct acpi_madt_eio_pic *)header; |
| 74 | + |
| 75 | + return eiointc_acpi_init(irq_domain, eiointc_entry); |
| 76 | +} |
| 77 | + |
| 78 | +static int __init acpi_cascade_irqdomain_init(void) |
| 79 | +{ |
| 80 | + acpi_table_parse_madt(ACPI_MADT_TYPE_LIO_PIC, |
| 81 | + liointc_parse_madt, 0); |
| 82 | + acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC, |
| 83 | + eiointc_parse_madt, 0); |
| 84 | + return 0; |
| 85 | +} |
| 86 | + |
| 87 | +static int __init cpuintc_acpi_init(union acpi_subtable_headers *header, |
| 88 | + const unsigned long end) |
| 89 | +{ |
| 90 | + if (irq_domain) |
| 91 | + return 0; |
| 92 | + |
| 93 | + /* Mask interrupts. */ |
| 94 | + clear_csr_ecfg(ECFG0_IM); |
| 95 | + clear_csr_estat(ESTATF_IP); |
| 96 | + |
| 97 | + cpuintc_handle = irq_domain_alloc_fwnode(NULL); |
| 98 | + irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM, |
| 99 | + &loongarch_cpu_intc_irq_domain_ops, NULL); |
| 100 | + |
| 101 | + if (!irq_domain) |
| 102 | + panic("Failed to add irqdomain for LoongArch CPU"); |
| 103 | + |
| 104 | + set_handle_irq(&handle_cpu_irq); |
| 105 | + acpi_cascade_irqdomain_init(); |
| 106 | + |
| 107 | + return 0; |
| 108 | +} |
| 109 | + |
| 110 | +IRQCHIP_ACPI_DECLARE(cpuintc_v1, ACPI_MADT_TYPE_CORE_PIC, |
| 111 | + NULL, ACPI_MADT_CORE_PIC_VERSION_V1, cpuintc_acpi_init); |
0 commit comments