summaryrefslogtreecommitdiff
path: root/kernel/irq
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2025-10-20 13:29:32 +0100
committerThomas Gleixner <tglx@linutronix.de>2025-10-27 17:16:35 +0100
commitb9c6aa9efc71dae656f9f913d1250ea08cd6e10f (patch)
tree43e1a0fafa7c64c9d82e4bcaee9406c1ec313991 /kernel/irq
parent258e7d28a3dcd389239f9688058140c1a418b549 (diff)
genirq: Update request_percpu_nmi() to take an affinity
Continue spreading the notion of affinity to the per CPU interrupt request code by updating the call sites that use request_percpu_nmi() (all two of them) to take an affinity pointer. This pointer is firmly NULL for now. Signed-off-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Will Deacon <will@kernel.org> Link: https://patch.msgid.link/20251020122944.3074811-16-maz@kernel.org
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/manage.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 5f4c65167743..b1a3140e5f3c 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -2527,6 +2527,7 @@ EXPORT_SYMBOL_GPL(__request_percpu_irq);
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs.
* @name: An ascii name for the claiming device
+ * @affinity: A cpumask describing the target CPUs for this interrupt
* @dev_id: A percpu cookie passed back to the handler function
*
* This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
@@ -2543,8 +2544,8 @@ EXPORT_SYMBOL_GPL(__request_percpu_irq);
* If the interrupt line cannot be used to deliver NMIs, function
* will fail returning a negative value.
*/
-int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
- const char *name, void __percpu *dev_id)
+int request_percpu_nmi(unsigned int irq, irq_handler_t handler, const char *name,
+ const struct cpumask *affinity, void __percpu *dev_id)
{
struct irqaction *action;
struct irq_desc *desc;
@@ -2561,12 +2562,13 @@ int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
!irq_supports_nmi(desc))
return -EINVAL;
- /* The line cannot already be NMI */
- if (irq_is_nmi(desc))
+ /* The line cannot be NMI already if the new request covers all CPUs */
+ if (irq_is_nmi(desc) &&
+ (!affinity || cpumask_equal(affinity, cpu_possible_mask)))
return -EINVAL;
action = create_percpu_irqaction(handler, IRQF_NO_THREAD | IRQF_NOBALANCING,
- name, NULL, dev_id);
+ name, affinity, dev_id);
if (!action)
return -ENOMEM;