2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-18 02:04:05 +08:00

[PATCH] bcm43xx: remove badness variable and related routine

When the periodic work function in bcm43xx was converted for voluntary preemption
to reduce latency, a new function was created to estimate the "badness" of
each step, and this quantity was used to determine if preemption should be
enabled when periodic work was undertaken. This concept was quite useful
while debugging of periodic work was in progress. Now that this routine
seems to be working correctly, it is time to simplify the code. This
patch keeps the functionality intact, but simplifies the code.

Signed-off-by: Larry Finger <Larry.Finger@lwfinger.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
Larry Finger 2006-11-01 18:11:18 -06:00 committed by Jeff Garzik
parent 1494a81410
commit 08c3103a56

View File

@ -3209,55 +3209,27 @@ static void bcm43xx_periodic_every15sec(struct bcm43xx_private *bcm)
static void do_periodic_work(struct bcm43xx_private *bcm)
{
unsigned int state;
state = bcm->periodic_state;
if (state % 8 == 0)
if (bcm->periodic_state % 8 == 0)
bcm43xx_periodic_every120sec(bcm);
if (state % 4 == 0)
if (bcm->periodic_state % 4 == 0)
bcm43xx_periodic_every60sec(bcm);
if (state % 2 == 0)
if (bcm->periodic_state % 2 == 0)
bcm43xx_periodic_every30sec(bcm);
if (state % 1 == 0)
bcm43xx_periodic_every15sec(bcm);
bcm->periodic_state = state + 1;
bcm43xx_periodic_every15sec(bcm);
schedule_delayed_work(&bcm->periodic_work, HZ * 15);
}
/* Estimate a "Badness" value based on the periodic work
* state-machine state. "Badness" is worse (bigger), if the
* periodic work will take longer.
*/
static int estimate_periodic_work_badness(unsigned int state)
{
int badness = 0;
if (state % 8 == 0) /* every 120 sec */
badness += 10;
if (state % 4 == 0) /* every 60 sec */
badness += 5;
if (state % 2 == 0) /* every 30 sec */
badness += 1;
if (state % 1 == 0) /* every 15 sec */
badness += 1;
#define BADNESS_LIMIT 4
return badness;
}
static void bcm43xx_periodic_work_handler(void *d)
{
struct bcm43xx_private *bcm = d;
struct net_device *net_dev = bcm->net_dev;
unsigned long flags;
u32 savedirqs = 0;
int badness;
unsigned long orig_trans_start = 0;
mutex_lock(&bcm->mutex);
badness = estimate_periodic_work_badness(bcm->periodic_state);
if (badness > BADNESS_LIMIT) {
if (unlikely(bcm->periodic_state % 4 == 0)) {
/* Periodic work will take a long time, so we want it to
* be preemtible.
*/
@ -3289,7 +3261,7 @@ static void bcm43xx_periodic_work_handler(void *d)
do_periodic_work(bcm);
if (badness > BADNESS_LIMIT) {
if (unlikely(bcm->periodic_state % 4 == 0)) {
spin_lock_irqsave(&bcm->irq_lock, flags);
tasklet_enable(&bcm->isr_tasklet);
bcm43xx_interrupt_enable(bcm, savedirqs);
@ -3300,6 +3272,7 @@ static void bcm43xx_periodic_work_handler(void *d)
net_dev->trans_start = orig_trans_start;
}
mmiowb();
bcm->periodic_state++;
spin_unlock_irqrestore(&bcm->irq_lock, flags);
mutex_unlock(&bcm->mutex);
}