summaryrefslogtreecommitdiff
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2025-10-21 19:05:19 +0100
committerMark Brown <broonie@kernel.org>2025-10-21 19:05:19 +0100
commit2880c42a0de68e9eb979e5331fbd5b2c79e093ad (patch)
tree5d1637310889839165d9ae9594868c76a6c53808 /net/core/dev.c
parentecba655bf54a661ffe078856cd8dbc898270e4b5 (diff)
parent6658472a3e2de08197acfe099ba71ee0e2505ecf (diff)
ASoC: amd: ps: Propagate the PCI subsystem Vendor and
Merge series from Simon Trimmer <simont@opensource.cirrus.com>: This series of two patches propagates the PCI subsystem Vendor and Device IDs so that they can be used by component drivers to differentiate firmware loads.
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c40
1 files changed, 35 insertions, 5 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index a64cef2c537e..2acfa44927da 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -12176,6 +12176,35 @@ static void dev_memory_provider_uninstall(struct net_device *dev)
}
}
+/* devices must be UP and netdev_lock()'d */
+static void netif_close_many_and_unlock(struct list_head *close_head)
+{
+ struct net_device *dev, *tmp;
+
+ netif_close_many(close_head, false);
+
+ /* ... now unlock them */
+ list_for_each_entry_safe(dev, tmp, close_head, close_list) {
+ netdev_unlock(dev);
+ list_del_init(&dev->close_list);
+ }
+}
+
+static void netif_close_many_and_unlock_cond(struct list_head *close_head)
+{
+#ifdef CONFIG_LOCKDEP
+ /* We can only track up to MAX_LOCK_DEPTH locks per task.
+ *
+ * Reserve half the available slots for additional locks possibly
+ * taken by notifiers and (soft)irqs.
+ */
+ unsigned int limit = MAX_LOCK_DEPTH / 2;
+
+ if (lockdep_depth(current) > limit)
+ netif_close_many_and_unlock(close_head);
+#endif
+}
+
void unregister_netdevice_many_notify(struct list_head *head,
u32 portid, const struct nlmsghdr *nlh)
{
@@ -12208,17 +12237,18 @@ void unregister_netdevice_many_notify(struct list_head *head,
/* If device is running, close it first. Start with ops locked... */
list_for_each_entry(dev, head, unreg_list) {
+ if (!(dev->flags & IFF_UP))
+ continue;
if (netdev_need_ops_lock(dev)) {
list_add_tail(&dev->close_list, &close_head);
netdev_lock(dev);
}
+ netif_close_many_and_unlock_cond(&close_head);
}
- netif_close_many(&close_head, true);
- /* ... now unlock them and go over the rest. */
+ netif_close_many_and_unlock(&close_head);
+ /* ... now go over the rest. */
list_for_each_entry(dev, head, unreg_list) {
- if (netdev_need_ops_lock(dev))
- netdev_unlock(dev);
- else
+ if (!netdev_need_ops_lock(dev))
list_add_tail(&dev->close_list, &close_head);
}
netif_close_many(&close_head, true);