mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-17 01:34:00 +08:00
Merge branch 'net-inline-rollback_registered-functions'
After recent changes to the error path of register_netdevice() we no longer need a version of unregister_netdevice_many() which does not set net_todo. We can inline the rollback_registered() functions into respective unregister_netdevice() calls. Link: https://lore.kernel.org/r/20210119202521.3108236-1-kuba@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
d29aee6062
210
net/core/dev.c
210
net/core/dev.c
@ -5709,7 +5709,7 @@ static void flush_all_backlogs(void)
|
||||
}
|
||||
|
||||
/* we can have in flight packet[s] on the cpus we are not flushing,
|
||||
* synchronize_net() in rollback_registered_many() will take care of
|
||||
* synchronize_net() in unregister_netdevice_many() will take care of
|
||||
* them
|
||||
*/
|
||||
for_each_cpu(cpu, &flush_cpus)
|
||||
@ -9459,106 +9459,6 @@ static void net_set_todo(struct net_device *dev)
|
||||
dev_net(dev)->dev_unreg_count++;
|
||||
}
|
||||
|
||||
static void rollback_registered_many(struct list_head *head)
|
||||
{
|
||||
struct net_device *dev, *tmp;
|
||||
LIST_HEAD(close_head);
|
||||
|
||||
BUG_ON(dev_boot_phase);
|
||||
ASSERT_RTNL();
|
||||
|
||||
list_for_each_entry_safe(dev, tmp, head, unreg_list) {
|
||||
/* Some devices call without registering
|
||||
* for initialization unwind. Remove those
|
||||
* devices and proceed with the remaining.
|
||||
*/
|
||||
if (dev->reg_state == NETREG_UNINITIALIZED) {
|
||||
pr_debug("unregister_netdevice: device %s/%p never was registered\n",
|
||||
dev->name, dev);
|
||||
|
||||
WARN_ON(1);
|
||||
list_del(&dev->unreg_list);
|
||||
continue;
|
||||
}
|
||||
dev->dismantle = true;
|
||||
BUG_ON(dev->reg_state != NETREG_REGISTERED);
|
||||
}
|
||||
|
||||
/* If device is running, close it first. */
|
||||
list_for_each_entry(dev, head, unreg_list)
|
||||
list_add_tail(&dev->close_list, &close_head);
|
||||
dev_close_many(&close_head, true);
|
||||
|
||||
list_for_each_entry(dev, head, unreg_list) {
|
||||
/* And unlink it from device chain. */
|
||||
unlist_netdevice(dev);
|
||||
|
||||
dev->reg_state = NETREG_UNREGISTERING;
|
||||
}
|
||||
flush_all_backlogs();
|
||||
|
||||
synchronize_net();
|
||||
|
||||
list_for_each_entry(dev, head, unreg_list) {
|
||||
struct sk_buff *skb = NULL;
|
||||
|
||||
/* Shutdown queueing discipline. */
|
||||
dev_shutdown(dev);
|
||||
|
||||
dev_xdp_uninstall(dev);
|
||||
|
||||
/* Notify protocols, that we are about to destroy
|
||||
* this device. They should clean all the things.
|
||||
*/
|
||||
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
|
||||
|
||||
if (!dev->rtnl_link_ops ||
|
||||
dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
|
||||
skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
|
||||
GFP_KERNEL, NULL, 0);
|
||||
|
||||
/*
|
||||
* Flush the unicast and multicast chains
|
||||
*/
|
||||
dev_uc_flush(dev);
|
||||
dev_mc_flush(dev);
|
||||
|
||||
netdev_name_node_alt_flush(dev);
|
||||
netdev_name_node_free(dev->name_node);
|
||||
|
||||
if (dev->netdev_ops->ndo_uninit)
|
||||
dev->netdev_ops->ndo_uninit(dev);
|
||||
|
||||
if (skb)
|
||||
rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
|
||||
|
||||
/* Notifier chain MUST detach us all upper devices. */
|
||||
WARN_ON(netdev_has_any_upper_dev(dev));
|
||||
WARN_ON(netdev_has_any_lower_dev(dev));
|
||||
|
||||
/* Remove entries from kobject tree */
|
||||
netdev_unregister_kobject(dev);
|
||||
#ifdef CONFIG_XPS
|
||||
/* Remove XPS queueing entries */
|
||||
netif_reset_xps_queues_gt(dev, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
synchronize_net();
|
||||
|
||||
list_for_each_entry(dev, head, unreg_list)
|
||||
dev_put(dev);
|
||||
}
|
||||
|
||||
static void rollback_registered(struct net_device *dev)
|
||||
{
|
||||
LIST_HEAD(single);
|
||||
|
||||
list_add(&dev->unreg_list, &single);
|
||||
rollback_registered_many(&single);
|
||||
list_del(&single);
|
||||
}
|
||||
|
||||
static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
|
||||
struct net_device *upper, netdev_features_t features)
|
||||
{
|
||||
@ -10108,8 +10008,7 @@ int register_netdevice(struct net_device *dev)
|
||||
if (ret) {
|
||||
/* Expect explicit free_netdev() on failure */
|
||||
dev->needs_free_netdev = false;
|
||||
rollback_registered(dev);
|
||||
net_set_todo(dev);
|
||||
unregister_netdevice_queue(dev, NULL);
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
@ -10731,9 +10630,10 @@ void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
|
||||
if (head) {
|
||||
list_move_tail(&dev->unreg_list, head);
|
||||
} else {
|
||||
rollback_registered(dev);
|
||||
/* Finish processing unregister after unlock */
|
||||
net_set_todo(dev);
|
||||
LIST_HEAD(single);
|
||||
|
||||
list_add(&dev->unreg_list, &single);
|
||||
unregister_netdevice_many(&single);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(unregister_netdevice_queue);
|
||||
@ -10747,14 +10647,100 @@ EXPORT_SYMBOL(unregister_netdevice_queue);
|
||||
*/
|
||||
void unregister_netdevice_many(struct list_head *head)
|
||||
{
|
||||
struct net_device *dev;
|
||||
struct net_device *dev, *tmp;
|
||||
LIST_HEAD(close_head);
|
||||
|
||||
if (!list_empty(head)) {
|
||||
rollback_registered_many(head);
|
||||
list_for_each_entry(dev, head, unreg_list)
|
||||
net_set_todo(dev);
|
||||
list_del(head);
|
||||
BUG_ON(dev_boot_phase);
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (list_empty(head))
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(dev, tmp, head, unreg_list) {
|
||||
/* Some devices call without registering
|
||||
* for initialization unwind. Remove those
|
||||
* devices and proceed with the remaining.
|
||||
*/
|
||||
if (dev->reg_state == NETREG_UNINITIALIZED) {
|
||||
pr_debug("unregister_netdevice: device %s/%p never was registered\n",
|
||||
dev->name, dev);
|
||||
|
||||
WARN_ON(1);
|
||||
list_del(&dev->unreg_list);
|
||||
continue;
|
||||
}
|
||||
dev->dismantle = true;
|
||||
BUG_ON(dev->reg_state != NETREG_REGISTERED);
|
||||
}
|
||||
|
||||
/* If device is running, close it first. */
|
||||
list_for_each_entry(dev, head, unreg_list)
|
||||
list_add_tail(&dev->close_list, &close_head);
|
||||
dev_close_many(&close_head, true);
|
||||
|
||||
list_for_each_entry(dev, head, unreg_list) {
|
||||
/* And unlink it from device chain. */
|
||||
unlist_netdevice(dev);
|
||||
|
||||
dev->reg_state = NETREG_UNREGISTERING;
|
||||
}
|
||||
flush_all_backlogs();
|
||||
|
||||
synchronize_net();
|
||||
|
||||
list_for_each_entry(dev, head, unreg_list) {
|
||||
struct sk_buff *skb = NULL;
|
||||
|
||||
/* Shutdown queueing discipline. */
|
||||
dev_shutdown(dev);
|
||||
|
||||
dev_xdp_uninstall(dev);
|
||||
|
||||
/* Notify protocols, that we are about to destroy
|
||||
* this device. They should clean all the things.
|
||||
*/
|
||||
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
|
||||
|
||||
if (!dev->rtnl_link_ops ||
|
||||
dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
|
||||
skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
|
||||
GFP_KERNEL, NULL, 0);
|
||||
|
||||
/*
|
||||
* Flush the unicast and multicast chains
|
||||
*/
|
||||
dev_uc_flush(dev);
|
||||
dev_mc_flush(dev);
|
||||
|
||||
netdev_name_node_alt_flush(dev);
|
||||
netdev_name_node_free(dev->name_node);
|
||||
|
||||
if (dev->netdev_ops->ndo_uninit)
|
||||
dev->netdev_ops->ndo_uninit(dev);
|
||||
|
||||
if (skb)
|
||||
rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
|
||||
|
||||
/* Notifier chain MUST detach us all upper devices. */
|
||||
WARN_ON(netdev_has_any_upper_dev(dev));
|
||||
WARN_ON(netdev_has_any_lower_dev(dev));
|
||||
|
||||
/* Remove entries from kobject tree */
|
||||
netdev_unregister_kobject(dev);
|
||||
#ifdef CONFIG_XPS
|
||||
/* Remove XPS queueing entries */
|
||||
netif_reset_xps_queues_gt(dev, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
synchronize_net();
|
||||
|
||||
list_for_each_entry(dev, head, unreg_list) {
|
||||
dev_put(dev);
|
||||
net_set_todo(dev);
|
||||
}
|
||||
|
||||
list_del(head);
|
||||
}
|
||||
EXPORT_SYMBOL(unregister_netdevice_many);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user