mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-20 10:44:23 +08:00
3fc498f165
We have lots of infrastructure in place to partition multi-core systems such that we have a group of CPUs that are dedicated to specific task: cgroups, scheduler and interrupt affinity, and cpuisol= boot parameter. Still, kernel code will at times interrupt all CPUs in the system via IPIs for various needs. These IPIs are useful and cannot be avoided altogether, but in certain cases it is possible to interrupt only specific CPUs that have useful work to do and not the entire system. This patch set, inspired by discussions with Peter Zijlstra and Frederic Weisbecker when testing the nohz task patch set, is a first stab at trying to explore doing this by locating the places where such global IPI calls are being made and turning the global IPI into an IPI for a specific group of CPUs. The purpose of the patch set is to get feedback if this is the right way to go for dealing with this issue and indeed, if the issue is even worth dealing with at all. Based on the feedback from this patch set I plan to offer further patches that address similar issue in other code paths. This patch creates an on_each_cpu_mask() and on_each_cpu_cond() infrastructure API (the former derived from existing arch specific versions in Tile and Arm) and uses them to turn several global IPI invocation to per CPU group invocations. Core kernel: on_each_cpu_mask() calls a function on processors specified by cpumask, which may or may not include the local processor. You must not call this function with disabled interrupts or from a hardware interrupt handler or from a bottom half handler. arch/arm: Note that the generic version is a little different then the Arm one: 1. It has the mask as first parameter 2. It calls the function on the calling CPU with interrupts disabled, but this should be OK since the function is called on the other CPUs with interrupts disabled anyway. arch/tile: The API is the same as the tile private one, but the generic version also calls the function on the with interrupts disabled in UP case This is OK since the function is called on the other CPUs with interrupts disabled. Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com> Reviewed-by: Christoph Lameter <cl@linux.com> Acked-by: Chris Metcalf <cmetcalf@tilera.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: Pekka Enberg <penberg@kernel.org> Cc: Matt Mackall <mpm@selenic.com> Cc: Rik van Riel <riel@redhat.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Sasha Levin <levinsasha928@gmail.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Avi Kivity <avi@redhat.com> Acked-by: Michal Nazarewicz <mina86@mina86.org> Cc: Kosaki Motohiro <kosaki.motohiro@gmail.com> Cc: Milton Miller <miltonm@bga.com> Cc: Russell King <linux@arm.linux.org.uk> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
141 lines
4.0 KiB
C
141 lines
4.0 KiB
C
/*
|
|
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation, version 2.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
* NON INFRINGEMENT. See the GNU General Public License for
|
|
* more details.
|
|
*/
|
|
|
|
#ifndef _ASM_TILE_SMP_H
|
|
#define _ASM_TILE_SMP_H
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#include <asm/processor.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/irqreturn.h>
|
|
#include <hv/hypervisor.h>
|
|
|
|
/* Set up this tile to support receiving hypervisor messages */
|
|
void init_messaging(void);
|
|
|
|
/* Set up this tile to support receiving device interrupts and IPIs. */
|
|
void init_per_tile_IRQs(void);
|
|
|
|
/* Send a message to processors specified in mask */
|
|
void send_IPI_many(const struct cpumask *mask, int tag);
|
|
|
|
/* Send a message to all but the sending processor */
|
|
void send_IPI_allbutself(int tag);
|
|
|
|
/* Send a message to a specific processor */
|
|
void send_IPI_single(int dest, int tag);
|
|
|
|
/* Process an IPI message */
|
|
void evaluate_message(int tag);
|
|
|
|
/* Boot a secondary cpu */
|
|
void online_secondary(void);
|
|
|
|
/* Topology of the supervisor tile grid, and coordinates of boot processor */
|
|
extern HV_Topology smp_topology;
|
|
|
|
/* Accessors for grid size */
|
|
#define smp_height (smp_topology.height)
|
|
#define smp_width (smp_topology.width)
|
|
|
|
/* Convenience functions for converting cpu <-> coords. */
|
|
static inline int cpu_x(int cpu)
|
|
{
|
|
return cpu % smp_width;
|
|
}
|
|
static inline int cpu_y(int cpu)
|
|
{
|
|
return cpu / smp_width;
|
|
}
|
|
static inline int xy_to_cpu(int x, int y)
|
|
{
|
|
return y * smp_width + x;
|
|
}
|
|
|
|
/* Hypervisor message tags sent via the tile send_IPI*() routines. */
|
|
#define MSG_TAG_START_CPU 1
|
|
#define MSG_TAG_STOP_CPU 2
|
|
#define MSG_TAG_CALL_FUNCTION_MANY 3
|
|
#define MSG_TAG_CALL_FUNCTION_SINGLE 4
|
|
|
|
/* Hook for the generic smp_call_function_many() routine. */
|
|
static inline void arch_send_call_function_ipi_mask(struct cpumask *mask)
|
|
{
|
|
send_IPI_many(mask, MSG_TAG_CALL_FUNCTION_MANY);
|
|
}
|
|
|
|
/* Hook for the generic smp_call_function_single() routine. */
|
|
static inline void arch_send_call_function_single_ipi(int cpu)
|
|
{
|
|
send_IPI_single(cpu, MSG_TAG_CALL_FUNCTION_SINGLE);
|
|
}
|
|
|
|
/* Print out the boot string describing which cpus were disabled. */
|
|
void print_disabled_cpus(void);
|
|
|
|
#else /* !CONFIG_SMP */
|
|
|
|
#define smp_master_cpu 0
|
|
#define smp_height 1
|
|
#define smp_width 1
|
|
#define cpu_x(cpu) 0
|
|
#define cpu_y(cpu) 0
|
|
#define xy_to_cpu(x, y) 0
|
|
|
|
#endif /* !CONFIG_SMP */
|
|
|
|
|
|
/* Which cpus may be used as the lotar in a page table entry. */
|
|
extern struct cpumask cpu_lotar_map;
|
|
#define cpu_is_valid_lotar(cpu) cpumask_test_cpu((cpu), &cpu_lotar_map)
|
|
|
|
#if CHIP_HAS_CBOX_HOME_MAP()
|
|
/* Which processors are used for hash-for-home mapping */
|
|
extern struct cpumask hash_for_home_map;
|
|
#endif
|
|
|
|
/* Which cpus can have their cache flushed by hv_flush_remote(). */
|
|
extern struct cpumask cpu_cacheable_map;
|
|
#define cpu_cacheable(cpu) cpumask_test_cpu((cpu), &cpu_cacheable_map)
|
|
|
|
/* Convert an HV_LOTAR value into a cpu. */
|
|
static inline int hv_lotar_to_cpu(HV_LOTAR lotar)
|
|
{
|
|
return HV_LOTAR_X(lotar) + (HV_LOTAR_Y(lotar) * smp_width);
|
|
}
|
|
|
|
/*
|
|
* Extension of <linux/cpumask.h> functionality when you just want
|
|
* to express a mask or suppression or inclusion region without
|
|
* being too concerned about exactly which cpus are valid in that region.
|
|
*/
|
|
int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits);
|
|
|
|
#define cpulist_parse_crop(buf, dst) \
|
|
__cpulist_parse_crop((buf), (dst), NR_CPUS)
|
|
static inline int __cpulist_parse_crop(const char *buf, struct cpumask *dstp,
|
|
int nbits)
|
|
{
|
|
return bitmap_parselist_crop(buf, cpumask_bits(dstp), nbits);
|
|
}
|
|
|
|
/* Initialize the IPI subsystem. */
|
|
void ipi_init(void);
|
|
|
|
/* Function for start-cpu message to cause us to jump to. */
|
|
extern unsigned long start_cpu_function_addr;
|
|
|
|
#endif /* _ASM_TILE_SMP_H */
|