2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-19 16:14:13 +08:00

Merge git://github.com/davem330/net

* git://github.com/davem330/net:
  pch_gbe: Fixed the issue on which a network freezes
  pch_gbe: Fixed the issue on which PC was frozen when link was downed.
  make PACKET_STATISTICS getsockopt report consistently between ring and non-ring
  net: xen-netback: correctly restart Tx after a VM restore/migrate
  bonding: properly stop queuing work when requested
  can bcm: fix incomplete tx_setup fix
  RDSRDMA: Fix cleanup of rds_iw_mr_pool
  net: Documentation: Fix type of variables
  ibmveth: Fix oops on request_irq failure
  ipv6: nullify ipv6_ac_list and ipv6_fl_list when creating new socket
  cxgb4: Fix EEH on IBM P7IOC
  can bcm: fix tx_setup off-by-one errors
  MAINTAINERS: tehuti: Alexander Indenbaum's address bounces
  dp83640: reduce driver noise
  ptp: fix L2 event message recognition
This commit is contained in:
Linus Torvalds 2011-10-04 10:37:06 -07:00
commit 8a04b45367
15 changed files with 100 additions and 82 deletions

View File

@ -1042,7 +1042,7 @@ conf/interface/*:
The functional behaviour for certain settings is different The functional behaviour for certain settings is different
depending on whether local forwarding is enabled or not. depending on whether local forwarding is enabled or not.
accept_ra - BOOLEAN accept_ra - INTEGER
Accept Router Advertisements; autoconfigure using them. Accept Router Advertisements; autoconfigure using them.
Possible values are: Possible values are:
@ -1106,7 +1106,7 @@ dad_transmits - INTEGER
The amount of Duplicate Address Detection probes to send. The amount of Duplicate Address Detection probes to send.
Default: 1 Default: 1
forwarding - BOOLEAN forwarding - INTEGER
Configure interface-specific Host/Router behaviour. Configure interface-specific Host/Router behaviour.
Note: It is recommended to have the same setting on all Note: It is recommended to have the same setting on all

View File

@ -6374,7 +6374,6 @@ S: Supported
F: arch/arm/mach-tegra F: arch/arm/mach-tegra
TEHUTI ETHERNET DRIVER TEHUTI ETHERNET DRIVER
M: Alexander Indenbaum <baum@tehutinetworks.net>
M: Andy Gospodarek <andy@greyhouse.net> M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported

View File

@ -2168,7 +2168,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
} }
re_arm: re_arm:
queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); if (!bond->kill_timers)
queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
out: out:
read_unlock(&bond->lock); read_unlock(&bond->lock);
} }

View File

@ -1440,7 +1440,8 @@ void bond_alb_monitor(struct work_struct *work)
} }
re_arm: re_arm:
queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); if (!bond->kill_timers)
queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
out: out:
read_unlock(&bond->lock); read_unlock(&bond->lock);
} }

View File

@ -777,6 +777,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
read_lock(&bond->lock); read_lock(&bond->lock);
if (bond->kill_timers)
goto out;
/* rejoin all groups on bond device */ /* rejoin all groups on bond device */
__bond_resend_igmp_join_requests(bond->dev); __bond_resend_igmp_join_requests(bond->dev);
@ -790,9 +793,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
__bond_resend_igmp_join_requests(vlan_dev); __bond_resend_igmp_join_requests(vlan_dev);
} }
if (--bond->igmp_retrans > 0) if ((--bond->igmp_retrans > 0) && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
out:
read_unlock(&bond->lock); read_unlock(&bond->lock);
} }
@ -2538,7 +2541,7 @@ void bond_mii_monitor(struct work_struct *work)
} }
re_arm: re_arm:
if (bond->params.miimon) if (bond->params.miimon && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->mii_work, queue_delayed_work(bond->wq, &bond->mii_work,
msecs_to_jiffies(bond->params.miimon)); msecs_to_jiffies(bond->params.miimon));
out: out:
@ -2886,7 +2889,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
} }
re_arm: re_arm:
if (bond->params.arp_interval) if (bond->params.arp_interval && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
out: out:
read_unlock(&bond->lock); read_unlock(&bond->lock);
@ -3154,7 +3157,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
bond_ab_arp_probe(bond); bond_ab_arp_probe(bond);
re_arm: re_arm:
if (bond->params.arp_interval) if (bond->params.arp_interval && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
out: out:
read_unlock(&bond->lock); read_unlock(&bond->lock);

View File

@ -3712,6 +3712,9 @@ static int __devinit init_one(struct pci_dev *pdev,
setup_debugfs(adapter); setup_debugfs(adapter);
} }
/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
pdev->needs_freset = 1;
if (is_offload(adapter)) if (is_offload(adapter))
attach_ulds(adapter); attach_ulds(adapter);

View File

@ -636,8 +636,8 @@ static int ibmveth_open(struct net_device *netdev)
netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
netdev->irq, rc); netdev->irq, rc);
do { do {
rc = h_free_logical_lan(adapter->vdev->unit_address); lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
} while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
goto err_out; goto err_out;
} }

View File

@ -1199,6 +1199,8 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
&hw->reg->INT_EN); &hw->reg->INT_EN);
pch_gbe_stop_receive(adapter); pch_gbe_stop_receive(adapter);
int_st |= ioread32(&hw->reg->INT_ST);
int_st = int_st & ioread32(&hw->reg->INT_EN);
} }
if (int_st & PCH_GBE_INT_RX_DMA_ERR) if (int_st & PCH_GBE_INT_RX_DMA_ERR)
adapter->stats.intr_rx_dma_err_count++; adapter->stats.intr_rx_dma_err_count++;
@ -1218,14 +1220,11 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
/* Set Pause packet */ /* Set Pause packet */
pch_gbe_mac_set_pause_packet(hw); pch_gbe_mac_set_pause_packet(hw);
} }
if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
== 0) {
return IRQ_HANDLED;
}
} }
/* When request status is Receive interruption */ /* When request status is Receive interruption */
if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) { if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
(adapter->rx_stop_flag == true)) {
if (likely(napi_schedule_prep(&adapter->napi))) { if (likely(napi_schedule_prep(&adapter->napi))) {
/* Enable only Rx Descriptor empty */ /* Enable only Rx Descriptor empty */
atomic_inc(&adapter->irq_sem); atomic_inc(&adapter->irq_sem);
@ -1385,7 +1384,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
struct sk_buff *skb; struct sk_buff *skb;
unsigned int i; unsigned int i;
unsigned int cleaned_count = 0; unsigned int cleaned_count = 0;
bool cleaned = false; bool cleaned = true;
pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
@ -1396,7 +1395,6 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
cleaned = true;
buffer_info = &tx_ring->buffer_info[i]; buffer_info = &tx_ring->buffer_info[i];
skb = buffer_info->skb; skb = buffer_info->skb;
@ -1439,8 +1437,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
/* weight of a sort for tx, to avoid endless transmit cleanup */ /* weight of a sort for tx, to avoid endless transmit cleanup */
if (cleaned_count++ == PCH_GBE_TX_WEIGHT) if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
cleaned = false;
break; break;
}
} }
pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
cleaned_count); cleaned_count);
@ -2168,7 +2168,6 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
{ {
struct pch_gbe_adapter *adapter = struct pch_gbe_adapter *adapter =
container_of(napi, struct pch_gbe_adapter, napi); container_of(napi, struct pch_gbe_adapter, napi);
struct net_device *netdev = adapter->netdev;
int work_done = 0; int work_done = 0;
bool poll_end_flag = false; bool poll_end_flag = false;
bool cleaned = false; bool cleaned = false;
@ -2176,33 +2175,32 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
pr_debug("budget : %d\n", budget); pr_debug("budget : %d\n", budget);
/* Keep link state information with original netdev */ pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
if (!netif_carrier_ok(netdev)) { cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
if (!cleaned)
work_done = budget;
/* If no Tx and not enough Rx work done,
* exit the polling mode
*/
if (work_done < budget)
poll_end_flag = true; poll_end_flag = true;
} else {
pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); if (poll_end_flag) {
napi_complete(napi);
if (adapter->rx_stop_flag) {
adapter->rx_stop_flag = false;
pch_gbe_start_receive(&adapter->hw);
}
pch_gbe_irq_enable(adapter);
} else
if (adapter->rx_stop_flag) { if (adapter->rx_stop_flag) {
adapter->rx_stop_flag = false; adapter->rx_stop_flag = false;
pch_gbe_start_receive(&adapter->hw); pch_gbe_start_receive(&adapter->hw);
int_en = ioread32(&adapter->hw.reg->INT_EN); int_en = ioread32(&adapter->hw.reg->INT_EN);
iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
&adapter->hw.reg->INT_EN); &adapter->hw.reg->INT_EN);
} }
cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
if (cleaned)
work_done = budget;
/* If no Tx and not enough Rx work done,
* exit the polling mode
*/
if ((work_done < budget) || !netif_running(netdev))
poll_end_flag = true;
}
if (poll_end_flag) {
napi_complete(napi);
pch_gbe_irq_enable(adapter);
}
pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
poll_end_flag, work_done, budget); poll_end_flag, work_done, budget);

View File

@ -589,7 +589,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
prune_rx_ts(dp83640); prune_rx_ts(dp83640);
if (list_empty(&dp83640->rxpool)) { if (list_empty(&dp83640->rxpool)) {
pr_warning("dp83640: rx timestamp pool is empty\n"); pr_debug("dp83640: rx timestamp pool is empty\n");
goto out; goto out;
} }
rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
@ -612,7 +612,7 @@ static void decode_txts(struct dp83640_private *dp83640,
skb = skb_dequeue(&dp83640->tx_queue); skb = skb_dequeue(&dp83640->tx_queue);
if (!skb) { if (!skb) {
pr_warning("dp83640: have timestamp but tx_queue empty\n"); pr_debug("dp83640: have timestamp but tx_queue empty\n");
return; return;
} }
ns = phy2txts(phy_txts); ns = phy2txts(phy_txts);

View File

@ -327,12 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
xenvif_get(vif); xenvif_get(vif);
rtnl_lock(); rtnl_lock();
if (netif_running(vif->dev))
xenvif_up(vif);
if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
dev_set_mtu(vif->dev, ETH_DATA_LEN); dev_set_mtu(vif->dev, ETH_DATA_LEN);
netdev_update_features(vif->dev); netdev_update_features(vif->dev);
netif_carrier_on(vif->dev); netif_carrier_on(vif->dev);
if (netif_running(vif->dev))
xenvif_up(vif);
rtnl_unlock(); rtnl_unlock();
return 0; return 0;

View File

@ -51,6 +51,7 @@
#define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) #define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN)
#define PTP_EV_PORT 319 #define PTP_EV_PORT 319
#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
#define OFF_ETYPE 12 #define OFF_ETYPE 12
#define OFF_IHL 14 #define OFF_IHL 14
@ -116,14 +117,20 @@ static inline int ptp_filter_init(struct sock_filter *f, int len)
{OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \ {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \
{OP_RETA, 0, 0, 0 }, /* */ \ {OP_RETA, 0, 0, 0 }, /* */ \
/*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \ /*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \
/*L40*/ {OP_JEQ, 0, 6, ETH_P_8021Q }, /* f goto L50 */ \ /*L40*/ {OP_JEQ, 0, 9, ETH_P_8021Q }, /* f goto L50 */ \
{OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \ {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \
{OP_JEQ, 0, 9, ETH_P_1588 }, /* f goto L60 */ \ {OP_JEQ, 0, 15, ETH_P_1588 }, /* f goto L60 */ \
{OP_LDB, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
{OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
{OP_JEQ, 0, 12, 0 }, /* f goto L6x */ \
{OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
{OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
{OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \ {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \
{OP_RETA, 0, 0, 0 }, /* */ \ {OP_RETA, 0, 0, 0 }, /* */ \
/*L50*/ {OP_JEQ, 0, 4, ETH_P_1588 }, /* f goto L61 */ \ /*L50*/ {OP_JEQ, 0, 7, ETH_P_1588 }, /* f goto L61 */ \
{OP_LDB, 0, 0, ETH_HLEN }, /* */ \
{OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
{OP_JEQ, 0, 4, 0 }, /* f goto L6x */ \
{OP_LDH, 0, 0, ETH_HLEN }, /* */ \ {OP_LDH, 0, 0, ETH_HLEN }, /* */ \
{OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
{OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \ {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \

View File

@ -344,6 +344,18 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
} }
} }
static void bcm_tx_start_timer(struct bcm_op *op)
{
if (op->kt_ival1.tv64 && op->count)
hrtimer_start(&op->timer,
ktime_add(ktime_get(), op->kt_ival1),
HRTIMER_MODE_ABS);
else if (op->kt_ival2.tv64)
hrtimer_start(&op->timer,
ktime_add(ktime_get(), op->kt_ival2),
HRTIMER_MODE_ABS);
}
static void bcm_tx_timeout_tsklet(unsigned long data) static void bcm_tx_timeout_tsklet(unsigned long data)
{ {
struct bcm_op *op = (struct bcm_op *)data; struct bcm_op *op = (struct bcm_op *)data;
@ -365,26 +377,12 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
bcm_send_to_user(op, &msg_head, NULL, 0); bcm_send_to_user(op, &msg_head, NULL, 0);
} }
}
if (op->kt_ival1.tv64 && (op->count > 0)) {
/* send (next) frame */
bcm_can_tx(op); bcm_can_tx(op);
hrtimer_start(&op->timer,
ktime_add(ktime_get(), op->kt_ival1),
HRTIMER_MODE_ABS);
} else { } else if (op->kt_ival2.tv64)
if (op->kt_ival2.tv64) { bcm_can_tx(op);
/* send (next) frame */ bcm_tx_start_timer(op);
bcm_can_tx(op);
hrtimer_start(&op->timer,
ktime_add(ktime_get(), op->kt_ival2),
HRTIMER_MODE_ABS);
}
}
} }
/* /*
@ -964,23 +962,20 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
hrtimer_cancel(&op->timer); hrtimer_cancel(&op->timer);
} }
if ((op->flags & STARTTIMER) && if (op->flags & STARTTIMER) {
((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) { hrtimer_cancel(&op->timer);
/* spec: send can_frame when starting timer */ /* spec: send can_frame when starting timer */
op->flags |= TX_ANNOUNCE; op->flags |= TX_ANNOUNCE;
if (op->kt_ival1.tv64 && (op->count > 0)) {
/* op->count-- is done in bcm_tx_timeout_handler */
hrtimer_start(&op->timer, op->kt_ival1,
HRTIMER_MODE_REL);
} else
hrtimer_start(&op->timer, op->kt_ival2,
HRTIMER_MODE_REL);
} }
if (op->flags & TX_ANNOUNCE) if (op->flags & TX_ANNOUNCE) {
bcm_can_tx(op); bcm_can_tx(op);
if (op->count)
op->count--;
}
if (op->flags & STARTTIMER)
bcm_tx_start_timer(op);
return msg_head->nframes * CFSIZ + MHSIZ; return msg_head->nframes * CFSIZ + MHSIZ;
} }

View File

@ -1383,6 +1383,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newtp->af_specific = &tcp_sock_ipv6_mapped_specific; newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif #endif
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL; newnp->pktoptions = NULL;
newnp->opt = NULL; newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb); newnp->mcast_oif = inet6_iif(skb);
@ -1447,6 +1449,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
First: no IPv4 options. First: no IPv4 options.
*/ */
newinet->inet_opt = NULL; newinet->inet_opt = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL; newnp->ipv6_fl_list = NULL;
/* Clone RX bits */ /* Clone RX bits */

View File

@ -961,7 +961,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
return 0; return 0;
drop_n_acct: drop_n_acct:
po->stats.tp_drops = atomic_inc_return(&sk->sk_drops); spin_lock(&sk->sk_receive_queue.lock);
po->stats.tp_drops++;
atomic_inc(&sk->sk_drops);
spin_unlock(&sk->sk_receive_queue.lock);
drop_n_restore: drop_n_restore:
if (skb_head != skb->data && skb_shared(skb)) { if (skb_head != skb->data && skb_shared(skb)) {

View File

@ -84,7 +84,8 @@ static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
struct list_head *unmap_list, struct list_head *unmap_list,
struct list_head *kill_list); struct list_head *kill_list,
int *unpinned);
static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
@ -499,7 +500,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
LIST_HEAD(unmap_list); LIST_HEAD(unmap_list);
LIST_HEAD(kill_list); LIST_HEAD(kill_list);
unsigned long flags; unsigned long flags;
unsigned int nfreed = 0, ncleaned = 0, free_goal; unsigned int nfreed = 0, ncleaned = 0, unpinned = 0, free_goal;
int ret = 0; int ret = 0;
rds_iw_stats_inc(s_iw_rdma_mr_pool_flush); rds_iw_stats_inc(s_iw_rdma_mr_pool_flush);
@ -524,7 +525,8 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
* will be destroyed by the unmap function. * will be destroyed by the unmap function.
*/ */
if (!list_empty(&unmap_list)) { if (!list_empty(&unmap_list)) {
ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, &kill_list); ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list,
&kill_list, &unpinned);
/* If we've been asked to destroy all MRs, move those /* If we've been asked to destroy all MRs, move those
* that were simply cleaned to the kill list */ * that were simply cleaned to the kill list */
if (free_all) if (free_all)
@ -548,6 +550,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
spin_unlock_irqrestore(&pool->list_lock, flags); spin_unlock_irqrestore(&pool->list_lock, flags);
} }
atomic_sub(unpinned, &pool->free_pinned);
atomic_sub(ncleaned, &pool->dirty_count); atomic_sub(ncleaned, &pool->dirty_count);
atomic_sub(nfreed, &pool->item_count); atomic_sub(nfreed, &pool->item_count);
@ -828,7 +831,8 @@ static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool,
static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
struct list_head *unmap_list, struct list_head *unmap_list,
struct list_head *kill_list) struct list_head *kill_list,
int *unpinned)
{ {
struct rds_iw_mapping *mapping, *next; struct rds_iw_mapping *mapping, *next;
unsigned int ncleaned = 0; unsigned int ncleaned = 0;
@ -855,6 +859,7 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
spin_lock_irqsave(&pool->list_lock, flags); spin_lock_irqsave(&pool->list_lock, flags);
list_for_each_entry_safe(mapping, next, unmap_list, m_list) { list_for_each_entry_safe(mapping, next, unmap_list, m_list) {
*unpinned += mapping->m_sg.len;
list_move(&mapping->m_list, &laundered); list_move(&mapping->m_list, &laundered);
ncleaned++; ncleaned++;
} }