2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-22 20:23:57 +08:00

[PATCH] ppc64: Simplify counting of lpevents, remove lpevent_count from paca

Currently there's a per-cpu count of lpevents processed, a per-queue (ie.
global) total count, and a count by event type.

Replace all that with a count by event for each cpu. We only need to add
it up int the proc code.

Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Acked-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
Michael Ellerman 2005-06-30 15:16:09 +10:00 committed by Paul Mackerras
parent 74889802a1
commit ed094150bd
3 changed files with 29 additions and 18 deletions
arch/ppc64/kernel
include/asm-ppc64

View File

@ -28,7 +28,9 @@
*/
struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
static char *event_types[9] = {
DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
static char *event_types[HvLpEvent_Type_NumTypes] = {
"Hypervisor\t\t",
"Machine Facilities\t",
"Session Manager\t",
@ -129,7 +131,6 @@ static void hvlpevent_clear_valid( struct HvLpEvent * event )
void process_hvlpevents(struct pt_regs *regs)
{
unsigned numIntsProcessed = 0;
struct HvLpEvent * nextLpEvent;
/* If we have recursed, just return */
@ -144,8 +145,6 @@ void process_hvlpevents(struct pt_regs *regs)
for (;;) {
nextLpEvent = get_next_hvlpevent();
if ( nextLpEvent ) {
++numIntsProcessed;
hvlpevent_queue.xLpIntCount++;
/* Call appropriate handler here, passing
* a pointer to the LpEvent. The handler
* must make a copy of the LpEvent if it
@ -160,7 +159,7 @@ void process_hvlpevents(struct pt_regs *regs)
* here!
*/
if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
hvlpevent_queue.xLpIntCountByType[nextLpEvent->xType]++;
__get_cpu_var(hvlpevent_counts)[nextLpEvent->xType]++;
if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
lpEventHandler[nextLpEvent->xType] )
lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
@ -181,8 +180,6 @@ void process_hvlpevents(struct pt_regs *regs)
ItLpQueueInProcess = 0;
mb();
clear_inUse();
get_paca()->lpevent_count += numIntsProcessed;
}
static int set_spread_lpevents(char *str)
@ -228,20 +225,37 @@ void setup_hvlpevent_queue(void)
static int proc_lpevents_show(struct seq_file *m, void *v)
{
unsigned int i;
int cpu, i;
unsigned long sum;
static unsigned long cpu_totals[NR_CPUS];
/* FIXME: do we care that there's no locking here? */
sum = 0;
for_each_online_cpu(cpu) {
cpu_totals[cpu] = 0;
for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
}
sum += cpu_totals[cpu];
}
seq_printf(m, "LpEventQueue 0\n");
seq_printf(m, " events processed:\t%lu\n",
(unsigned long)hvlpevent_queue.xLpIntCount);
seq_printf(m, " events processed:\t%lu\n", sum);
for (i = 0; i < 9; ++i)
seq_printf(m, " %s %10lu\n", event_types[i],
(unsigned long)hvlpevent_queue.xLpIntCountByType[i]);
for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
sum = 0;
for_each_online_cpu(cpu) {
sum += per_cpu(hvlpevent_counts, cpu)[i];
}
seq_printf(m, " %s %10lu\n", event_types[i], sum);
}
seq_printf(m, "\n events processed by processor:\n");
for_each_online_cpu(i)
seq_printf(m, " CPU%02d %10u\n", i, paca[i].lpevent_count);
for_each_online_cpu(cpu) {
seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]);
}
return 0;
}

View File

@ -70,8 +70,6 @@ struct hvlpevent_queue {
u8 xIndex; // 0x28 unique sequential index.
u8 xSlicRsvd[3]; // 0x29-2b
u32 xInUseWord; // 0x2C
u64 xLpIntCount; // 0x30 Total Lp Int msgs processed
u64 xLpIntCountByType[9]; // 0x38-0x7F Event counts by type
};
extern struct hvlpevent_queue hvlpevent_queue;

View File

@ -89,7 +89,6 @@ struct paca_struct {
u64 next_jiffy_update_tb; /* TB value for next jiffy update */
u64 saved_r1; /* r1 save for RTAS calls */
u64 saved_msr; /* MSR saved here by enter_rtas */
u32 lpevent_count; /* lpevents processed */
u8 proc_enabled; /* irq soft-enable flag */
/* not yet used */