mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-27 14:43:58 +08:00
3310225dfc
PROP_MAX_SHIFT should be set to <=32 on 64-bit box. This fixes two bugs in the below lines of bdi_dirty_limit(): bdi_dirty *= numerator; do_div(bdi_dirty, denominator); 1) divide error: do_div() only uses the lower 32 bit of the denominator, which may trimmed to be 0 when PROP_MAX_SHIFT > 32. 2) overflow: (bdi_dirty * numerator) could easily overflow if numerator used up to 48 bits, leaving only 16 bits to bdi_dirty Cc: <stable@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Reported-by: Ilya Tumaykin <librarian_rus@yahoo.com> Tested-by: Ilya Tumaykin <librarian_rus@yahoo.com> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
137 lines
3.2 KiB
C
137 lines
3.2 KiB
C
/*
|
|
* FLoating proportions
|
|
*
|
|
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
|
*
|
|
* This file contains the public data structure and API definitions.
|
|
*/
|
|
|
|
#ifndef _LINUX_PROPORTIONS_H
|
|
#define _LINUX_PROPORTIONS_H
|
|
|
|
#include <linux/percpu_counter.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/mutex.h>
|
|
|
|
struct prop_global {
|
|
/*
|
|
* The period over which we differentiate
|
|
*
|
|
* period = 2^shift
|
|
*/
|
|
int shift;
|
|
/*
|
|
* The total event counter aka 'time'.
|
|
*
|
|
* Treated as an unsigned long; the lower 'shift - 1' bits are the
|
|
* counter bits, the remaining upper bits the period counter.
|
|
*/
|
|
struct percpu_counter events;
|
|
};
|
|
|
|
/*
|
|
* global proportion descriptor
|
|
*
|
|
* this is needed to consitently flip prop_global structures.
|
|
*/
|
|
struct prop_descriptor {
|
|
int index;
|
|
struct prop_global pg[2];
|
|
struct mutex mutex; /* serialize the prop_global switch */
|
|
};
|
|
|
|
int prop_descriptor_init(struct prop_descriptor *pd, int shift);
|
|
void prop_change_shift(struct prop_descriptor *pd, int new_shift);
|
|
|
|
/*
|
|
* ----- PERCPU ------
|
|
*/
|
|
|
|
struct prop_local_percpu {
|
|
/*
|
|
* the local events counter
|
|
*/
|
|
struct percpu_counter events;
|
|
|
|
/*
|
|
* snapshot of the last seen global state
|
|
*/
|
|
int shift;
|
|
unsigned long period;
|
|
raw_spinlock_t lock; /* protect the snapshot state */
|
|
};
|
|
|
|
int prop_local_init_percpu(struct prop_local_percpu *pl);
|
|
void prop_local_destroy_percpu(struct prop_local_percpu *pl);
|
|
void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
|
|
void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
|
|
long *numerator, long *denominator);
|
|
|
|
static inline
|
|
void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
|
|
{
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
__prop_inc_percpu(pd, pl);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
/*
|
|
* Limit the time part in order to ensure there are some bits left for the
|
|
* cycle counter and fraction multiply.
|
|
*/
|
|
#if BITS_PER_LONG == 32
|
|
#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
|
|
#else
|
|
#define PROP_MAX_SHIFT (BITS_PER_LONG/2)
|
|
#endif
|
|
|
|
#define PROP_FRAC_SHIFT (BITS_PER_LONG - PROP_MAX_SHIFT - 1)
|
|
#define PROP_FRAC_BASE (1UL << PROP_FRAC_SHIFT)
|
|
|
|
void __prop_inc_percpu_max(struct prop_descriptor *pd,
|
|
struct prop_local_percpu *pl, long frac);
|
|
|
|
|
|
/*
|
|
* ----- SINGLE ------
|
|
*/
|
|
|
|
struct prop_local_single {
|
|
/*
|
|
* the local events counter
|
|
*/
|
|
unsigned long events;
|
|
|
|
/*
|
|
* snapshot of the last seen global state
|
|
* and a lock protecting this state
|
|
*/
|
|
unsigned long period;
|
|
int shift;
|
|
raw_spinlock_t lock; /* protect the snapshot state */
|
|
};
|
|
|
|
#define INIT_PROP_LOCAL_SINGLE(name) \
|
|
{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
|
|
}
|
|
|
|
int prop_local_init_single(struct prop_local_single *pl);
|
|
void prop_local_destroy_single(struct prop_local_single *pl);
|
|
void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
|
|
void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
|
|
long *numerator, long *denominator);
|
|
|
|
static inline
|
|
void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
|
|
{
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
__prop_inc_single(pd, pl);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
#endif /* _LINUX_PROPORTIONS_H */
|