mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-24 19:45:06 +08:00
iavf: use mutexes for locking of critical sections
As follow-up to the discussion with Jakub Kicinski about iavf locking being insufficient [1] convert iavf to use mutexes instead of bitops. The locking logic is kept as is, just a drop-in replacement of enum iavf_critical_section_t with separate mutexes. The only difference is that the mutexes will be destroyed before the module is unloaded. [1] https://lwn.net/ml/netdev/20210316150210.00007249%40intel.com/ Signed-off-by: Stefan Assmann <sassmann@kpanic.de> Tested-by: Marek Szlosek <marek.szlosek@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
This commit is contained in:
parent
752be29764
commit
5ac49f3c27
@ -185,12 +185,6 @@ enum iavf_state_t {
|
|||||||
__IAVF_RUNNING, /* opened, working */
|
__IAVF_RUNNING, /* opened, working */
|
||||||
};
|
};
|
||||||
|
|
||||||
enum iavf_critical_section_t {
|
|
||||||
__IAVF_IN_CRITICAL_TASK, /* cannot be interrupted */
|
|
||||||
__IAVF_IN_CLIENT_TASK,
|
|
||||||
__IAVF_IN_REMOVE_TASK, /* device being removed */
|
|
||||||
};
|
|
||||||
|
|
||||||
#define IAVF_CLOUD_FIELD_OMAC 0x01
|
#define IAVF_CLOUD_FIELD_OMAC 0x01
|
||||||
#define IAVF_CLOUD_FIELD_IMAC 0x02
|
#define IAVF_CLOUD_FIELD_IMAC 0x02
|
||||||
#define IAVF_CLOUD_FIELD_IVLAN 0x04
|
#define IAVF_CLOUD_FIELD_IVLAN 0x04
|
||||||
@ -235,6 +229,9 @@ struct iavf_adapter {
|
|||||||
struct iavf_q_vector *q_vectors;
|
struct iavf_q_vector *q_vectors;
|
||||||
struct list_head vlan_filter_list;
|
struct list_head vlan_filter_list;
|
||||||
struct list_head mac_filter_list;
|
struct list_head mac_filter_list;
|
||||||
|
struct mutex crit_lock;
|
||||||
|
struct mutex client_lock;
|
||||||
|
struct mutex remove_lock;
|
||||||
/* Lock to protect accesses to MAC and VLAN lists */
|
/* Lock to protect accesses to MAC and VLAN lists */
|
||||||
spinlock_t mac_vlan_list_lock;
|
spinlock_t mac_vlan_list_lock;
|
||||||
char misc_vector_name[IFNAMSIZ + 9];
|
char misc_vector_name[IFNAMSIZ + 9];
|
||||||
|
@ -1352,8 +1352,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
|
|||||||
if (!fltr)
|
if (!fltr)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
|
while (!mutex_trylock(&adapter->crit_lock)) {
|
||||||
&adapter->crit_section)) {
|
|
||||||
if (--count == 0) {
|
if (--count == 0) {
|
||||||
kfree(fltr);
|
kfree(fltr);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -1378,7 +1377,7 @@ ret:
|
|||||||
if (err && fltr)
|
if (err && fltr)
|
||||||
kfree(fltr);
|
kfree(fltr);
|
||||||
|
|
||||||
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1563,8 +1562,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
|
while (!mutex_trylock(&adapter->crit_lock)) {
|
||||||
&adapter->crit_section)) {
|
|
||||||
if (--count == 0) {
|
if (--count == 0) {
|
||||||
kfree(rss_new);
|
kfree(rss_new);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -1600,7 +1598,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
|
|||||||
if (!err)
|
if (!err)
|
||||||
mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
|
mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
|
||||||
|
|
||||||
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
|
|
||||||
if (!rss_new_add)
|
if (!rss_new_add)
|
||||||
kfree(rss_new);
|
kfree(rss_new);
|
||||||
|
@ -132,21 +132,18 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* iavf_lock_timeout - try to set bit but give up after timeout
|
* iavf_lock_timeout - try to lock mutex but give up after timeout
|
||||||
* @adapter: board private structure
|
* @lock: mutex that should be locked
|
||||||
* @bit: bit to set
|
|
||||||
* @msecs: timeout in msecs
|
* @msecs: timeout in msecs
|
||||||
*
|
*
|
||||||
* Returns 0 on success, negative on failure
|
* Returns 0 on success, negative on failure
|
||||||
**/
|
**/
|
||||||
static int iavf_lock_timeout(struct iavf_adapter *adapter,
|
static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
|
||||||
enum iavf_critical_section_t bit,
|
|
||||||
unsigned int msecs)
|
|
||||||
{
|
{
|
||||||
unsigned int wait, delay = 10;
|
unsigned int wait, delay = 10;
|
||||||
|
|
||||||
for (wait = 0; wait < msecs; wait += delay) {
|
for (wait = 0; wait < msecs; wait += delay) {
|
||||||
if (!test_and_set_bit(bit, &adapter->crit_section))
|
if (mutex_trylock(lock))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
msleep(delay);
|
msleep(delay);
|
||||||
@ -1939,7 +1936,7 @@ static void iavf_watchdog_task(struct work_struct *work)
|
|||||||
struct iavf_hw *hw = &adapter->hw;
|
struct iavf_hw *hw = &adapter->hw;
|
||||||
u32 reg_val;
|
u32 reg_val;
|
||||||
|
|
||||||
if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section))
|
if (!mutex_trylock(&adapter->crit_lock))
|
||||||
goto restart_watchdog;
|
goto restart_watchdog;
|
||||||
|
|
||||||
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
|
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
|
||||||
@ -1957,8 +1954,7 @@ static void iavf_watchdog_task(struct work_struct *work)
|
|||||||
adapter->state = __IAVF_STARTUP;
|
adapter->state = __IAVF_STARTUP;
|
||||||
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
|
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
|
||||||
queue_delayed_work(iavf_wq, &adapter->init_task, 10);
|
queue_delayed_work(iavf_wq, &adapter->init_task, 10);
|
||||||
clear_bit(__IAVF_IN_CRITICAL_TASK,
|
mutex_unlock(&adapter->crit_lock);
|
||||||
&adapter->crit_section);
|
|
||||||
/* Don't reschedule the watchdog, since we've restarted
|
/* Don't reschedule the watchdog, since we've restarted
|
||||||
* the init task. When init_task contacts the PF and
|
* the init task. When init_task contacts the PF and
|
||||||
* gets everything set up again, it'll restart the
|
* gets everything set up again, it'll restart the
|
||||||
@ -1968,14 +1964,13 @@ static void iavf_watchdog_task(struct work_struct *work)
|
|||||||
}
|
}
|
||||||
adapter->aq_required = 0;
|
adapter->aq_required = 0;
|
||||||
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
|
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
|
||||||
clear_bit(__IAVF_IN_CRITICAL_TASK,
|
mutex_unlock(&adapter->crit_lock);
|
||||||
&adapter->crit_section);
|
|
||||||
queue_delayed_work(iavf_wq,
|
queue_delayed_work(iavf_wq,
|
||||||
&adapter->watchdog_task,
|
&adapter->watchdog_task,
|
||||||
msecs_to_jiffies(10));
|
msecs_to_jiffies(10));
|
||||||
goto watchdog_done;
|
goto watchdog_done;
|
||||||
case __IAVF_RESETTING:
|
case __IAVF_RESETTING:
|
||||||
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
|
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
|
||||||
return;
|
return;
|
||||||
case __IAVF_DOWN:
|
case __IAVF_DOWN:
|
||||||
@ -1998,7 +1993,7 @@ static void iavf_watchdog_task(struct work_struct *work)
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case __IAVF_REMOVE:
|
case __IAVF_REMOVE:
|
||||||
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
return;
|
return;
|
||||||
default:
|
default:
|
||||||
goto restart_watchdog;
|
goto restart_watchdog;
|
||||||
@ -2020,7 +2015,7 @@ watchdog_done:
|
|||||||
if (adapter->state == __IAVF_RUNNING ||
|
if (adapter->state == __IAVF_RUNNING ||
|
||||||
adapter->state == __IAVF_COMM_FAILED)
|
adapter->state == __IAVF_COMM_FAILED)
|
||||||
iavf_detect_recover_hung(&adapter->vsi);
|
iavf_detect_recover_hung(&adapter->vsi);
|
||||||
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
restart_watchdog:
|
restart_watchdog:
|
||||||
if (adapter->aq_required)
|
if (adapter->aq_required)
|
||||||
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
|
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
|
||||||
@ -2084,7 +2079,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
|
|||||||
memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
|
memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
|
||||||
iavf_shutdown_adminq(&adapter->hw);
|
iavf_shutdown_adminq(&adapter->hw);
|
||||||
adapter->netdev->flags &= ~IFF_UP;
|
adapter->netdev->flags &= ~IFF_UP;
|
||||||
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
|
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
|
||||||
adapter->state = __IAVF_DOWN;
|
adapter->state = __IAVF_DOWN;
|
||||||
wake_up(&adapter->down_waitqueue);
|
wake_up(&adapter->down_waitqueue);
|
||||||
@ -2117,15 +2112,14 @@ static void iavf_reset_task(struct work_struct *work)
|
|||||||
/* When device is being removed it doesn't make sense to run the reset
|
/* When device is being removed it doesn't make sense to run the reset
|
||||||
* task, just return in such a case.
|
* task, just return in such a case.
|
||||||
*/
|
*/
|
||||||
if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
|
if (mutex_is_locked(&adapter->remove_lock))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 200)) {
|
if (iavf_lock_timeout(&adapter->crit_lock, 200)) {
|
||||||
schedule_work(&adapter->reset_task);
|
schedule_work(&adapter->reset_task);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
while (test_and_set_bit(__IAVF_IN_CLIENT_TASK,
|
while (!mutex_trylock(&adapter->client_lock))
|
||||||
&adapter->crit_section))
|
|
||||||
usleep_range(500, 1000);
|
usleep_range(500, 1000);
|
||||||
if (CLIENT_ENABLED(adapter)) {
|
if (CLIENT_ENABLED(adapter)) {
|
||||||
adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
|
adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
|
||||||
@ -2177,7 +2171,7 @@ static void iavf_reset_task(struct work_struct *work)
|
|||||||
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
|
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
|
||||||
reg_val);
|
reg_val);
|
||||||
iavf_disable_vf(adapter);
|
iavf_disable_vf(adapter);
|
||||||
clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->client_lock);
|
||||||
return; /* Do not attempt to reinit. It's dead, Jim. */
|
return; /* Do not attempt to reinit. It's dead, Jim. */
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2304,13 +2298,13 @@ continue_reset:
|
|||||||
adapter->state = __IAVF_DOWN;
|
adapter->state = __IAVF_DOWN;
|
||||||
wake_up(&adapter->down_waitqueue);
|
wake_up(&adapter->down_waitqueue);
|
||||||
}
|
}
|
||||||
clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->client_lock);
|
||||||
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
reset_err:
|
reset_err:
|
||||||
clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->client_lock);
|
||||||
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
|
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
|
||||||
iavf_close(netdev);
|
iavf_close(netdev);
|
||||||
}
|
}
|
||||||
@ -2338,7 +2332,7 @@ static void iavf_adminq_task(struct work_struct *work)
|
|||||||
if (!event.msg_buf)
|
if (!event.msg_buf)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 200))
|
if (iavf_lock_timeout(&adapter->crit_lock, 200))
|
||||||
goto freedom;
|
goto freedom;
|
||||||
do {
|
do {
|
||||||
ret = iavf_clean_arq_element(hw, &event, &pending);
|
ret = iavf_clean_arq_element(hw, &event, &pending);
|
||||||
@ -2353,7 +2347,7 @@ static void iavf_adminq_task(struct work_struct *work)
|
|||||||
if (pending != 0)
|
if (pending != 0)
|
||||||
memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
|
memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
|
||||||
} while (pending);
|
} while (pending);
|
||||||
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
|
|
||||||
if ((adapter->flags &
|
if ((adapter->flags &
|
||||||
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
|
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
|
||||||
@ -2420,7 +2414,7 @@ static void iavf_client_task(struct work_struct *work)
|
|||||||
* later.
|
* later.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section))
|
if (!mutex_trylock(&adapter->client_lock))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
|
if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
|
||||||
@ -2443,7 +2437,7 @@ static void iavf_client_task(struct work_struct *work)
|
|||||||
adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
|
adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->client_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -3046,8 +3040,7 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
|
|||||||
if (!filter)
|
if (!filter)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
|
while (!mutex_trylock(&adapter->crit_lock)) {
|
||||||
&adapter->crit_section)) {
|
|
||||||
if (--count == 0)
|
if (--count == 0)
|
||||||
goto err;
|
goto err;
|
||||||
udelay(1);
|
udelay(1);
|
||||||
@ -3078,7 +3071,7 @@ err:
|
|||||||
if (err)
|
if (err)
|
||||||
kfree(filter);
|
kfree(filter);
|
||||||
|
|
||||||
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3225,8 +3218,7 @@ static int iavf_open(struct net_device *netdev)
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
|
while (!mutex_trylock(&adapter->crit_lock))
|
||||||
&adapter->crit_section))
|
|
||||||
usleep_range(500, 1000);
|
usleep_range(500, 1000);
|
||||||
|
|
||||||
if (adapter->state != __IAVF_DOWN) {
|
if (adapter->state != __IAVF_DOWN) {
|
||||||
@ -3261,7 +3253,7 @@ static int iavf_open(struct net_device *netdev)
|
|||||||
|
|
||||||
iavf_irq_enable(adapter, true);
|
iavf_irq_enable(adapter, true);
|
||||||
|
|
||||||
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -3273,7 +3265,7 @@ err_setup_rx:
|
|||||||
err_setup_tx:
|
err_setup_tx:
|
||||||
iavf_free_all_tx_resources(adapter);
|
iavf_free_all_tx_resources(adapter);
|
||||||
err_unlock:
|
err_unlock:
|
||||||
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -3297,8 +3289,7 @@ static int iavf_close(struct net_device *netdev)
|
|||||||
if (adapter->state <= __IAVF_DOWN_PENDING)
|
if (adapter->state <= __IAVF_DOWN_PENDING)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
|
while (!mutex_trylock(&adapter->crit_lock))
|
||||||
&adapter->crit_section))
|
|
||||||
usleep_range(500, 1000);
|
usleep_range(500, 1000);
|
||||||
|
|
||||||
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
|
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
|
||||||
@ -3309,7 +3300,7 @@ static int iavf_close(struct net_device *netdev)
|
|||||||
adapter->state = __IAVF_DOWN_PENDING;
|
adapter->state = __IAVF_DOWN_PENDING;
|
||||||
iavf_free_traffic_irqs(adapter);
|
iavf_free_traffic_irqs(adapter);
|
||||||
|
|
||||||
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
|
|
||||||
/* We explicitly don't free resources here because the hardware is
|
/* We explicitly don't free resources here because the hardware is
|
||||||
* still active and can DMA into memory. Resources are cleared in
|
* still active and can DMA into memory. Resources are cleared in
|
||||||
@ -3658,8 +3649,8 @@ static void iavf_init_task(struct work_struct *work)
|
|||||||
init_task.work);
|
init_task.work);
|
||||||
struct iavf_hw *hw = &adapter->hw;
|
struct iavf_hw *hw = &adapter->hw;
|
||||||
|
|
||||||
if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000)) {
|
if (iavf_lock_timeout(&adapter->crit_lock, 5000)) {
|
||||||
dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__);
|
dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
switch (adapter->state) {
|
switch (adapter->state) {
|
||||||
@ -3694,7 +3685,7 @@ init_failed:
|
|||||||
}
|
}
|
||||||
queue_delayed_work(iavf_wq, &adapter->init_task, HZ);
|
queue_delayed_work(iavf_wq, &adapter->init_task, HZ);
|
||||||
out:
|
out:
|
||||||
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -3711,12 +3702,12 @@ static void iavf_shutdown(struct pci_dev *pdev)
|
|||||||
if (netif_running(netdev))
|
if (netif_running(netdev))
|
||||||
iavf_close(netdev);
|
iavf_close(netdev);
|
||||||
|
|
||||||
if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000))
|
if (iavf_lock_timeout(&adapter->crit_lock, 5000))
|
||||||
dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__);
|
dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
|
||||||
/* Prevent the watchdog from running. */
|
/* Prevent the watchdog from running. */
|
||||||
adapter->state = __IAVF_REMOVE;
|
adapter->state = __IAVF_REMOVE;
|
||||||
adapter->aq_required = 0;
|
adapter->aq_required = 0;
|
||||||
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
#ifdef CONFIG_PM
|
||||||
pci_save_state(pdev);
|
pci_save_state(pdev);
|
||||||
@ -3810,6 +3801,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||||||
/* set up the locks for the AQ, do this only once in probe
|
/* set up the locks for the AQ, do this only once in probe
|
||||||
* and destroy them only once in remove
|
* and destroy them only once in remove
|
||||||
*/
|
*/
|
||||||
|
mutex_init(&adapter->crit_lock);
|
||||||
|
mutex_init(&adapter->client_lock);
|
||||||
|
mutex_init(&adapter->remove_lock);
|
||||||
mutex_init(&hw->aq.asq_mutex);
|
mutex_init(&hw->aq.asq_mutex);
|
||||||
mutex_init(&hw->aq.arq_mutex);
|
mutex_init(&hw->aq.arq_mutex);
|
||||||
|
|
||||||
@ -3861,8 +3855,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
|
|||||||
|
|
||||||
netif_device_detach(netdev);
|
netif_device_detach(netdev);
|
||||||
|
|
||||||
while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
|
while (!mutex_trylock(&adapter->crit_lock))
|
||||||
&adapter->crit_section))
|
|
||||||
usleep_range(500, 1000);
|
usleep_range(500, 1000);
|
||||||
|
|
||||||
if (netif_running(netdev)) {
|
if (netif_running(netdev)) {
|
||||||
@ -3873,7 +3866,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
|
|||||||
iavf_free_misc_irq(adapter);
|
iavf_free_misc_irq(adapter);
|
||||||
iavf_reset_interrupt_capability(adapter);
|
iavf_reset_interrupt_capability(adapter);
|
||||||
|
|
||||||
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -3935,7 +3928,7 @@ static void iavf_remove(struct pci_dev *pdev)
|
|||||||
struct iavf_hw *hw = &adapter->hw;
|
struct iavf_hw *hw = &adapter->hw;
|
||||||
int err;
|
int err;
|
||||||
/* Indicate we are in remove and not to run reset_task */
|
/* Indicate we are in remove and not to run reset_task */
|
||||||
set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
|
mutex_lock(&adapter->remove_lock);
|
||||||
cancel_delayed_work_sync(&adapter->init_task);
|
cancel_delayed_work_sync(&adapter->init_task);
|
||||||
cancel_work_sync(&adapter->reset_task);
|
cancel_work_sync(&adapter->reset_task);
|
||||||
cancel_delayed_work_sync(&adapter->client_task);
|
cancel_delayed_work_sync(&adapter->client_task);
|
||||||
@ -3957,8 +3950,8 @@ static void iavf_remove(struct pci_dev *pdev)
|
|||||||
iavf_request_reset(adapter);
|
iavf_request_reset(adapter);
|
||||||
msleep(50);
|
msleep(50);
|
||||||
}
|
}
|
||||||
if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000))
|
if (iavf_lock_timeout(&adapter->crit_lock, 5000))
|
||||||
dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__);
|
dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
|
||||||
|
|
||||||
/* Shut down all the garbage mashers on the detention level */
|
/* Shut down all the garbage mashers on the detention level */
|
||||||
adapter->state = __IAVF_REMOVE;
|
adapter->state = __IAVF_REMOVE;
|
||||||
@ -3983,6 +3976,11 @@ static void iavf_remove(struct pci_dev *pdev)
|
|||||||
/* destroy the locks only once, here */
|
/* destroy the locks only once, here */
|
||||||
mutex_destroy(&hw->aq.arq_mutex);
|
mutex_destroy(&hw->aq.arq_mutex);
|
||||||
mutex_destroy(&hw->aq.asq_mutex);
|
mutex_destroy(&hw->aq.asq_mutex);
|
||||||
|
mutex_destroy(&adapter->client_lock);
|
||||||
|
mutex_unlock(&adapter->crit_lock);
|
||||||
|
mutex_destroy(&adapter->crit_lock);
|
||||||
|
mutex_unlock(&adapter->remove_lock);
|
||||||
|
mutex_destroy(&adapter->remove_lock);
|
||||||
|
|
||||||
iounmap(hw->hw_addr);
|
iounmap(hw->hw_addr);
|
||||||
pci_release_regions(pdev);
|
pci_release_regions(pdev);
|
||||||
|
Loading…
Reference in New Issue
Block a user