powerpc/xive: Untangle xive from child interrupt controller drivers

xive-specific data is stored in handler_data. This creates a mess, as xive
has to rely on child interrupt controller drivers to clean up this data, as
was done by 9a014f4568 ("powerpc/pseries/pci: Add a msi_free() handler to
clear XIVE data").

Instead, store xive-specific data in chip_data and untangle the child
drivers.

Signed-off-by: Nam Cao <namcao@linutronix.de>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/83968073022a4cc211dcbd0faccd20ec05e58c3e.1754903590.git.namcao@linutronix.de
This commit is contained in:
Nam Cao 2025-08-11 11:28:54 +02:00 committed by Madhavan Srinivasan
parent b034baff11
commit cc0cc23bab
4 changed files with 33 additions and 70 deletions

View File

@ -111,7 +111,6 @@ void xive_native_free_vp_block(u32 vp_base);
int xive_native_populate_irq_data(u32 hw_irq, int xive_native_populate_irq_data(u32 hw_irq,
struct xive_irq_data *data); struct xive_irq_data *data);
void xive_cleanup_irq_data(struct xive_irq_data *xd); void xive_cleanup_irq_data(struct xive_irq_data *xd);
void xive_irq_free_data(unsigned int virq);
void xive_native_free_irq(u32 irq); void xive_native_free_irq(u32 irq);
int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq); int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);

View File

@ -37,7 +37,6 @@
#include <asm/firmware.h> #include <asm/firmware.h>
#include <asm/pnv-pci.h> #include <asm/pnv-pci.h>
#include <asm/mmzone.h> #include <asm/mmzone.h>
#include <asm/xive.h>
#include "powernv.h" #include "powernv.h"
#include "pci.h" #include "pci.h"
@ -1707,23 +1706,6 @@ static int __pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
return 0; return 0;
} }
/*
* The msi_free() op is called before irq_domain_free_irqs_top() when
* the handler data is still available. Use that to clear the XIVE
* controller.
*/
static void pnv_msi_ops_msi_free(struct irq_domain *domain,
struct msi_domain_info *info,
unsigned int irq)
{
if (xive_enabled())
xive_irq_free_data(irq);
}
static struct msi_domain_ops pnv_pci_msi_domain_ops = {
.msi_free = pnv_msi_ops_msi_free,
};
static void pnv_msi_shutdown(struct irq_data *d) static void pnv_msi_shutdown(struct irq_data *d)
{ {
d = d->parent_data; d = d->parent_data;
@ -1754,7 +1736,6 @@ static struct irq_chip pnv_pci_msi_irq_chip = {
static struct msi_domain_info pnv_msi_domain_info = { static struct msi_domain_info pnv_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
.ops = &pnv_pci_msi_domain_ops,
.chip = &pnv_pci_msi_irq_chip, .chip = &pnv_pci_msi_irq_chip,
}; };
@ -1870,7 +1851,7 @@ static void pnv_irq_domain_free(struct irq_domain *domain, unsigned int virq,
virq, d->hwirq, nr_irqs); virq, d->hwirq, nr_irqs);
msi_bitmap_free_hwirqs(&phb->msi_bmp, d->hwirq, nr_irqs); msi_bitmap_free_hwirqs(&phb->msi_bmp, d->hwirq, nr_irqs);
/* XIVE domain is cleared through ->msi_free() */ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
} }
static const struct irq_domain_ops pnv_irq_domain_ops = { static const struct irq_domain_ops pnv_irq_domain_ops = {

View File

@ -15,7 +15,6 @@
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/ppc-pci.h> #include <asm/ppc-pci.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/xive.h>
#include "pseries.h" #include "pseries.h"
@ -436,19 +435,6 @@ static int pseries_msi_ops_prepare(struct irq_domain *domain, struct device *dev
return rtas_prepare_msi_irqs(pdev, nvec, type, arg); return rtas_prepare_msi_irqs(pdev, nvec, type, arg);
} }
/*
* ->msi_free() is called before irq_domain_free_irqs_top() when the
* handler data is still available. Use that to clear the XIVE
* controller data.
*/
static void pseries_msi_ops_msi_free(struct irq_domain *domain,
struct msi_domain_info *info,
unsigned int irq)
{
if (xive_enabled())
xive_irq_free_data(irq);
}
/* /*
* RTAS can not disable one MSI at a time. It's all or nothing. Do it * RTAS can not disable one MSI at a time. It's all or nothing. Do it
* at the end after all IRQs have been freed. * at the end after all IRQs have been freed.
@ -463,7 +449,6 @@ static void pseries_msi_post_free(struct irq_domain *domain, struct device *dev)
static struct msi_domain_ops pseries_pci_msi_domain_ops = { static struct msi_domain_ops pseries_pci_msi_domain_ops = {
.msi_prepare = pseries_msi_ops_prepare, .msi_prepare = pseries_msi_ops_prepare,
.msi_free = pseries_msi_ops_msi_free,
.msi_post_free = pseries_msi_post_free, .msi_post_free = pseries_msi_post_free,
}; };
@ -604,8 +589,7 @@ static void pseries_irq_domain_free(struct irq_domain *domain, unsigned int virq
struct pci_controller *phb = irq_data_get_irq_chip_data(d); struct pci_controller *phb = irq_data_get_irq_chip_data(d);
pr_debug("%s bridge %pOF %d #%d\n", __func__, phb->dn, virq, nr_irqs); pr_debug("%s bridge %pOF %d #%d\n", __func__, phb->dn, virq, nr_irqs);
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
/* XIVE domain data is cleared through ->msi_free() */
} }
static const struct irq_domain_ops pseries_irq_domain_ops = { static const struct irq_domain_ops pseries_irq_domain_ops = {

View File

@ -317,7 +317,7 @@ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
if (d) { if (d) {
char buffer[128]; char buffer[128];
xive_irq_data_dump(irq_data_get_irq_handler_data(d), xive_irq_data_dump(irq_data_get_irq_chip_data(d),
buffer, sizeof(buffer)); buffer, sizeof(buffer));
xmon_printf("%s", buffer); xmon_printf("%s", buffer);
} }
@ -437,7 +437,7 @@ static void xive_do_source_eoi(struct xive_irq_data *xd)
/* irq_chip eoi callback, called with irq descriptor lock held */ /* irq_chip eoi callback, called with irq descriptor lock held */
static void xive_irq_eoi(struct irq_data *d) static void xive_irq_eoi(struct irq_data *d)
{ {
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
struct xive_cpu *xc = __this_cpu_read(xive_cpu); struct xive_cpu *xc = __this_cpu_read(xive_cpu);
DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n", DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
@ -595,7 +595,7 @@ static int xive_pick_irq_target(struct irq_data *d,
const struct cpumask *affinity) const struct cpumask *affinity)
{ {
static unsigned int fuzz; static unsigned int fuzz;
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
cpumask_var_t mask; cpumask_var_t mask;
int cpu = -1; int cpu = -1;
@ -628,7 +628,7 @@ static int xive_pick_irq_target(struct irq_data *d,
static unsigned int xive_irq_startup(struct irq_data *d) static unsigned int xive_irq_startup(struct irq_data *d)
{ {
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
int target, rc; int target, rc;
@ -673,7 +673,7 @@ static unsigned int xive_irq_startup(struct irq_data *d)
/* called with irq descriptor lock held */ /* called with irq descriptor lock held */
static void xive_irq_shutdown(struct irq_data *d) static void xive_irq_shutdown(struct irq_data *d)
{ {
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d); pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d);
@ -698,7 +698,7 @@ static void xive_irq_shutdown(struct irq_data *d)
static void xive_irq_unmask(struct irq_data *d) static void xive_irq_unmask(struct irq_data *d)
{ {
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd); pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd);
@ -707,7 +707,7 @@ static void xive_irq_unmask(struct irq_data *d)
static void xive_irq_mask(struct irq_data *d) static void xive_irq_mask(struct irq_data *d)
{ {
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd); pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd);
@ -718,7 +718,7 @@ static int xive_irq_set_affinity(struct irq_data *d,
const struct cpumask *cpumask, const struct cpumask *cpumask,
bool force) bool force)
{ {
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
u32 target, old_target; u32 target, old_target;
int rc = 0; int rc = 0;
@ -776,7 +776,7 @@ static int xive_irq_set_affinity(struct irq_data *d,
static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type) static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
{ {
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
/* /*
* We only support these. This has really no effect other than setting * We only support these. This has really no effect other than setting
@ -815,7 +815,7 @@ static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
static int xive_irq_retrigger(struct irq_data *d) static int xive_irq_retrigger(struct irq_data *d)
{ {
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
/* This should be only for MSIs */ /* This should be only for MSIs */
if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
@ -837,7 +837,7 @@ static int xive_irq_retrigger(struct irq_data *d)
*/ */
static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
{ {
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); struct xive_irq_data *xd = irq_data_get_irq_chip_data(d);
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
int rc; int rc;
u8 pq; u8 pq;
@ -951,7 +951,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
static int xive_get_irqchip_state(struct irq_data *data, static int xive_get_irqchip_state(struct irq_data *data,
enum irqchip_irq_state which, bool *state) enum irqchip_irq_state which, bool *state)
{ {
struct xive_irq_data *xd = irq_data_get_irq_handler_data(data); struct xive_irq_data *xd = irq_data_get_irq_chip_data(data);
u8 pq; u8 pq;
switch (which) { switch (which) {
@ -1011,21 +1011,20 @@ void xive_cleanup_irq_data(struct xive_irq_data *xd)
} }
EXPORT_SYMBOL_GPL(xive_cleanup_irq_data); EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw) static struct xive_irq_data *xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
{ {
struct xive_irq_data *xd; struct xive_irq_data *xd;
int rc; int rc;
xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL); xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
if (!xd) if (!xd)
return -ENOMEM; return ERR_PTR(-ENOMEM);
rc = xive_ops->populate_irq_data(hw, xd); rc = xive_ops->populate_irq_data(hw, xd);
if (rc) { if (rc) {
kfree(xd); kfree(xd);
return rc; return ERR_PTR(rc);
} }
xd->target = XIVE_INVALID_TARGET; xd->target = XIVE_INVALID_TARGET;
irq_set_handler_data(virq, xd);
/* /*
* Turn OFF by default the interrupt being mapped. A side * Turn OFF by default the interrupt being mapped. A side
@ -1036,20 +1035,19 @@ static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
*/ */
xive_esb_read(xd, XIVE_ESB_SET_PQ_01); xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
return 0; return xd;
} }
void xive_irq_free_data(unsigned int virq) static void xive_irq_free_data(unsigned int virq)
{ {
struct xive_irq_data *xd = irq_get_handler_data(virq); struct xive_irq_data *xd = irq_get_chip_data(virq);
if (!xd) if (!xd)
return; return;
irq_set_handler_data(virq, NULL); irq_set_chip_data(virq, NULL);
xive_cleanup_irq_data(xd); xive_cleanup_irq_data(xd);
kfree(xd); kfree(xd);
} }
EXPORT_SYMBOL_GPL(xive_irq_free_data);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -1286,7 +1284,7 @@ void __init xive_smp_probe(void)
static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq, static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw) irq_hw_number_t hw)
{ {
int rc; struct xive_irq_data *xd;
/* /*
* Mark interrupts as edge sensitive by default so that resend * Mark interrupts as edge sensitive by default so that resend
@ -1294,11 +1292,12 @@ static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
*/ */
irq_clear_status_flags(virq, IRQ_LEVEL); irq_clear_status_flags(virq, IRQ_LEVEL);
rc = xive_irq_alloc_data(virq, hw); xd = xive_irq_alloc_data(virq, hw);
if (rc) if (IS_ERR(xd))
return rc; return PTR_ERR(xd);
irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq); irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
irq_set_chip_data(virq, xd);
return 0; return 0;
} }
@ -1366,7 +1365,7 @@ static void xive_irq_domain_debug_show(struct seq_file *m, struct irq_domain *d,
seq_printf(m, "%*sXIVE:\n", ind, ""); seq_printf(m, "%*sXIVE:\n", ind, "");
ind++; ind++;
xd = irq_data_get_irq_handler_data(irqd); xd = irq_data_get_irq_chip_data(irqd);
if (!xd) { if (!xd) {
seq_printf(m, "%*snot assigned\n", ind, ""); seq_printf(m, "%*snot assigned\n", ind, "");
return; return;
@ -1403,6 +1402,7 @@ static int xive_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg) unsigned int nr_irqs, void *arg)
{ {
struct irq_fwspec *fwspec = arg; struct irq_fwspec *fwspec = arg;
struct xive_irq_data *xd;
irq_hw_number_t hwirq; irq_hw_number_t hwirq;
unsigned int type = IRQ_TYPE_NONE; unsigned int type = IRQ_TYPE_NONE;
int i, rc; int i, rc;
@ -1423,12 +1423,11 @@ static int xive_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
irq_clear_status_flags(virq, IRQ_LEVEL); irq_clear_status_flags(virq, IRQ_LEVEL);
/* allocates and sets handler data */ /* allocates and sets handler data */
rc = xive_irq_alloc_data(virq + i, hwirq + i); xd = xive_irq_alloc_data(virq + i, hwirq + i);
if (rc) if (IS_ERR(xd))
return rc; return PTR_ERR(xd);
irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &xive_irq_chip, xd);
&xive_irq_chip, domain->host_data);
irq_set_handler(virq + i, handle_fasteoi_irq); irq_set_handler(virq + i, handle_fasteoi_irq);
} }
@ -1764,7 +1763,7 @@ static void xive_debug_show_irq(struct seq_file *m, struct irq_data *d)
seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
hw_irq, target, prio, lirq); hw_irq, target, prio, lirq);
xive_irq_data_dump(irq_data_get_irq_handler_data(d), buffer, sizeof(buffer)); xive_irq_data_dump(irq_data_get_irq_chip_data(d), buffer, sizeof(buffer));
seq_puts(m, buffer); seq_puts(m, buffer);
seq_puts(m, "\n"); seq_puts(m, "\n");
} }