mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-18 15:44:02 +08:00
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] fix personality(PER_LINUX32) performance issue [IA64] Properly unregister legacy interrupts [IA64] Remove NULL pointer check for argument never passed as NULL. [IA64] trivial cleanup for perfmon.c [IA64] trivial cleanup for entry.S [IA64] fix interrupt masking for pending works on kernel leave [IA64] allow user to force_pal_cache_flush [IA64] Don't reserve crashkernel memory > 4 GB [IA64] machvec support for SGI UV platform [IA64] Add header files for SGI UV platform
This commit is contained in:
commit
7371fd11a6
@ -686,6 +686,12 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||
floppy= [HW]
|
||||
See Documentation/floppy.txt.
|
||||
|
||||
force_pal_cache_flush
|
||||
[IA-64] Avoid check_sal_cache_flush which may hang on
|
||||
buggy SAL_CACHE_FLUSH implementations. Using this
|
||||
parameter will force ia64_sal_cache_flush to call
|
||||
ia64_pal_cache_flush instead of SAL_CACHE_FLUSH.
|
||||
|
||||
gamecon.map[2|3]=
|
||||
[HW,JOY] Multisystem joystick and NES/SNES/PSX pad
|
||||
support via parallel port (up to 5 devices per port)
|
||||
|
@ -135,6 +135,7 @@ config IA64_GENERIC
|
||||
HP-zx1/sx1000 For HP systems
|
||||
HP-zx1/sx1000+swiotlb For HP systems with (broken) DMA-constrained devices.
|
||||
SGI-SN2 For SGI Altix systems
|
||||
SGI-UV For SGI UV systems
|
||||
Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/>
|
||||
|
||||
If you don't know what to do, choose "generic".
|
||||
@ -170,6 +171,18 @@ config IA64_SGI_SN2
|
||||
to select this option. If in doubt, select ia64 generic support
|
||||
instead.
|
||||
|
||||
config IA64_SGI_UV`
|
||||
bool "SGI-UV`"
|
||||
select NUMA
|
||||
select ACPI_NUMA
|
||||
select SWIOTLB
|
||||
help
|
||||
Selecting this option will optimize the kernel for use on UV based
|
||||
systems, but the resulting kernel binary will not run on other
|
||||
types of ia64 systems. If you have an SGI UV system, it's safe
|
||||
to select this option. If in doubt, select ia64 generic support
|
||||
instead.
|
||||
|
||||
config IA64_HP_SIM
|
||||
bool "Ski-simulator"
|
||||
select SWIOTLB
|
||||
|
@ -63,7 +63,7 @@ drivers-$(CONFIG_PCI) += arch/ia64/pci/
|
||||
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
|
||||
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
|
||||
drivers-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
|
||||
drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/
|
||||
drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/ arch/ia64/uv/
|
||||
drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/
|
||||
|
||||
boot := arch/ia64/hp/sim/boot
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include <asm/intrinsics.h>
|
||||
@ -29,7 +28,6 @@
|
||||
|
||||
extern int die_if_kernel (char *str, struct pt_regs *regs, long err);
|
||||
|
||||
struct exec_domain ia32_exec_domain;
|
||||
struct page *ia32_shared_page[NR_CPUS];
|
||||
unsigned long *ia32_boot_gdt;
|
||||
unsigned long *cpu_gdt_table[NR_CPUS];
|
||||
@ -240,14 +238,6 @@ ia32_cpu_init (void)
|
||||
static int __init
|
||||
ia32_init (void)
|
||||
{
|
||||
ia32_exec_domain.name = "Linux/x86";
|
||||
ia32_exec_domain.handler = NULL;
|
||||
ia32_exec_domain.pers_low = PER_LINUX32;
|
||||
ia32_exec_domain.pers_high = PER_LINUX32;
|
||||
ia32_exec_domain.signal_map = default_exec_domain.signal_map;
|
||||
ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
|
||||
register_exec_domain(&ia32_exec_domain);
|
||||
|
||||
#if PAGE_SHIFT > IA32_PAGE_SHIFT
|
||||
{
|
||||
extern struct kmem_cache *ia64_partial_page_cachep;
|
||||
|
@ -117,7 +117,10 @@ acpi_get_sysname(void)
|
||||
if (!strcmp(hdr->oem_id, "HP")) {
|
||||
return "hpzx1";
|
||||
} else if (!strcmp(hdr->oem_id, "SGI")) {
|
||||
return "sn2";
|
||||
if (!strcmp(hdr->oem_table_id + 4, "UV"))
|
||||
return "uv";
|
||||
else
|
||||
return "sn2";
|
||||
}
|
||||
|
||||
return "dig";
|
||||
@ -130,6 +133,8 @@ acpi_get_sysname(void)
|
||||
return "hpzx1_swiotlb";
|
||||
# elif defined (CONFIG_IA64_SGI_SN2)
|
||||
return "sn2";
|
||||
# elif defined (CONFIG_IA64_SGI_UV)
|
||||
return "uv";
|
||||
# elif defined (CONFIG_IA64_DIG)
|
||||
return "dig";
|
||||
# else
|
||||
@ -622,6 +627,9 @@ void acpi_unregister_gsi(u32 gsi)
|
||||
if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
|
||||
return;
|
||||
|
||||
if (has_8259 && gsi < 16)
|
||||
return;
|
||||
|
||||
iosapic_unregister_intr(gsi);
|
||||
}
|
||||
|
||||
|
@ -1156,6 +1156,9 @@ skip_rbs_switch:
|
||||
* r31 = current->thread_info->flags
|
||||
* On exit:
|
||||
* p6 = TRUE if work-pending-check needs to be redone
|
||||
*
|
||||
* Interrupts are disabled on entry, reenabled depend on work, and
|
||||
* disabled on exit.
|
||||
*/
|
||||
.work_pending_syscall:
|
||||
add r2=-8,r2
|
||||
@ -1164,16 +1167,16 @@ skip_rbs_switch:
|
||||
st8 [r2]=r8
|
||||
st8 [r3]=r10
|
||||
.work_pending:
|
||||
tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0?
|
||||
tbit.z p6,p0=r31,TIF_NEED_RESCHED // is resched not needed?
|
||||
(p6) br.cond.sptk.few .notify
|
||||
#ifdef CONFIG_PREEMPT
|
||||
(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
|
||||
;;
|
||||
(pKStk) st4 [r20]=r21
|
||||
ssm psr.i // enable interrupts
|
||||
#endif
|
||||
ssm psr.i // enable interrupts
|
||||
br.call.spnt.many rp=schedule
|
||||
.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1
|
||||
.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
|
||||
rsm psr.i // disable interrupts
|
||||
;;
|
||||
#ifdef CONFIG_PREEMPT
|
||||
@ -1182,13 +1185,13 @@ skip_rbs_switch:
|
||||
(pKStk) st4 [r20]=r0 // preempt_count() <- 0
|
||||
#endif
|
||||
(pLvSys)br.cond.sptk.few .work_pending_syscall_end
|
||||
br.cond.sptk.many .work_processed_kernel // re-check
|
||||
br.cond.sptk.many .work_processed_kernel
|
||||
|
||||
.notify:
|
||||
(pUStk) br.call.spnt.many rp=notify_resume_user
|
||||
.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0
|
||||
.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check)
|
||||
(pLvSys)br.cond.sptk.few .work_pending_syscall_end
|
||||
br.cond.sptk.many .work_processed_kernel // don't re-check
|
||||
br.cond.sptk.many .work_processed_kernel
|
||||
|
||||
.work_pending_syscall_end:
|
||||
adds r2=PT(R8)+16,r12
|
||||
@ -1196,7 +1199,7 @@ skip_rbs_switch:
|
||||
;;
|
||||
ld8 r8=[r2]
|
||||
ld8 r10=[r3]
|
||||
br.cond.sptk.many .work_processed_syscall // re-check
|
||||
br.cond.sptk.many .work_processed_syscall
|
||||
|
||||
END(ia64_leave_kernel)
|
||||
|
||||
@ -1234,9 +1237,12 @@ GLOBAL_ENTRY(ia64_invoke_schedule_tail)
|
||||
END(ia64_invoke_schedule_tail)
|
||||
|
||||
/*
|
||||
* Setup stack and call do_notify_resume_user(). Note that pSys and pNonSys need to
|
||||
* be set up by the caller. We declare 8 input registers so the system call
|
||||
* args get preserved, in case we need to restart a system call.
|
||||
* Setup stack and call do_notify_resume_user(), keeping interrupts
|
||||
* disabled.
|
||||
*
|
||||
* Note that pSys and pNonSys need to be set up by the caller.
|
||||
* We declare 8 input registers so the system call args get preserved,
|
||||
* in case we need to restart a system call.
|
||||
*/
|
||||
ENTRY(notify_resume_user)
|
||||
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
|
||||
|
@ -900,12 +900,6 @@ static void
|
||||
palinfo_smp_call(void *info)
|
||||
{
|
||||
palinfo_smp_data_t *data = (palinfo_smp_data_t *)info;
|
||||
if (data == NULL) {
|
||||
printk(KERN_ERR "palinfo: data pointer is NULL\n");
|
||||
data->ret = 0; /* no output */
|
||||
return;
|
||||
}
|
||||
/* does this actual call */
|
||||
data->ret = (*data->func)(data->page);
|
||||
}
|
||||
|
||||
|
@ -5013,12 +5013,13 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
|
||||
}
|
||||
|
||||
static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
|
||||
|
||||
/*
|
||||
* pfm_handle_work() can be called with interrupts enabled
|
||||
* (TIF_NEED_RESCHED) or disabled. The down_interruptible
|
||||
* call may sleep, therefore we must re-enable interrupts
|
||||
* to avoid deadlocks. It is safe to do so because this function
|
||||
* is called ONLY when returning to user level (PUStk=1), in which case
|
||||
* is called ONLY when returning to user level (pUStk=1), in which case
|
||||
* there is no risk of kernel stack overflow due to deep
|
||||
* interrupt nesting.
|
||||
*/
|
||||
@ -5034,7 +5035,8 @@ pfm_handle_work(void)
|
||||
|
||||
ctx = PFM_GET_CTX(current);
|
||||
if (ctx == NULL) {
|
||||
printk(KERN_ERR "perfmon: [%d] has no PFM context\n", task_pid_nr(current));
|
||||
printk(KERN_ERR "perfmon: [%d] has no PFM context\n",
|
||||
task_pid_nr(current));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -5058,11 +5060,12 @@ pfm_handle_work(void)
|
||||
/*
|
||||
* must be done before we check for simple-reset mode
|
||||
*/
|
||||
if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie;
|
||||
|
||||
if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE)
|
||||
goto do_zombie;
|
||||
|
||||
//if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
|
||||
if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
|
||||
if (reason == PFM_TRAP_REASON_RESET)
|
||||
goto skip_blocking;
|
||||
|
||||
/*
|
||||
* restore interrupt mask to what it was on entry.
|
||||
@ -5110,7 +5113,8 @@ do_zombie:
|
||||
/*
|
||||
* in case of interruption of down() we don't restart anything
|
||||
*/
|
||||
if (ret < 0) goto nothing_to_do;
|
||||
if (ret < 0)
|
||||
goto nothing_to_do;
|
||||
|
||||
skip_blocking:
|
||||
pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
|
||||
|
@ -167,11 +167,18 @@ void tsk_clear_notify_resume(struct task_struct *tsk)
|
||||
clear_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME);
|
||||
}
|
||||
|
||||
/*
|
||||
* do_notify_resume_user():
|
||||
* Called from notify_resume_user at entry.S, with interrupts disabled.
|
||||
*/
|
||||
void
|
||||
do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall)
|
||||
do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
|
||||
{
|
||||
if (fsys_mode(current, &scr->pt)) {
|
||||
/* defer signal-handling etc. until we return to privilege-level 0. */
|
||||
/*
|
||||
* defer signal-handling etc. until we return to
|
||||
* privilege-level 0.
|
||||
*/
|
||||
if (!ia64_psr(&scr->pt)->lp)
|
||||
ia64_psr(&scr->pt)->lp = 1;
|
||||
return;
|
||||
@ -179,16 +186,26 @@ do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall
|
||||
|
||||
#ifdef CONFIG_PERFMON
|
||||
if (current->thread.pfm_needs_checking)
|
||||
/*
|
||||
* Note: pfm_handle_work() allow us to call it with interrupts
|
||||
* disabled, and may enable interrupts within the function.
|
||||
*/
|
||||
pfm_handle_work();
|
||||
#endif
|
||||
|
||||
/* deal with pending signal delivery */
|
||||
if (test_thread_flag(TIF_SIGPENDING))
|
||||
if (test_thread_flag(TIF_SIGPENDING)) {
|
||||
local_irq_enable(); /* force interrupt enable */
|
||||
ia64_do_signal(scr, in_syscall);
|
||||
}
|
||||
|
||||
/* copy user rbs to kernel rbs */
|
||||
if (unlikely(test_thread_flag(TIF_RESTORE_RSE)))
|
||||
if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) {
|
||||
local_irq_enable(); /* force interrupt enable */
|
||||
ia64_sync_krbs();
|
||||
}
|
||||
|
||||
local_irq_disable(); /* force interrupt disable */
|
||||
}
|
||||
|
||||
static int pal_halt = 1;
|
||||
|
@ -229,6 +229,14 @@ static void __init sal_desc_ap_wakeup(void *p) { }
|
||||
*/
|
||||
static int sal_cache_flush_drops_interrupts;
|
||||
|
||||
static int __init
|
||||
force_pal_cache_flush(char *str)
|
||||
{
|
||||
sal_cache_flush_drops_interrupts = 1;
|
||||
return 0;
|
||||
}
|
||||
early_param("force_pal_cache_flush", force_pal_cache_flush);
|
||||
|
||||
void __init
|
||||
check_sal_cache_flush (void)
|
||||
{
|
||||
@ -237,6 +245,9 @@ check_sal_cache_flush (void)
|
||||
u64 vector, cache_type = 3;
|
||||
struct ia64_sal_retval isrv;
|
||||
|
||||
if (sal_cache_flush_drops_interrupts)
|
||||
return;
|
||||
|
||||
cpu = get_cpu();
|
||||
local_irq_save(flags);
|
||||
|
||||
|
@ -239,6 +239,25 @@ __initcall(register_memory);
|
||||
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
|
||||
/*
|
||||
* This function checks if the reserved crashkernel is allowed on the specific
|
||||
* IA64 machine flavour. Machines without an IO TLB use swiotlb and require
|
||||
* some memory below 4 GB (i.e. in 32 bit area), see the implementation of
|
||||
* lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that
|
||||
* in kdump case. See the comment in sba_init() in sba_iommu.c.
|
||||
*
|
||||
* So, the only machvec that really supports loading the kdump kernel
|
||||
* over 4 GB is "sn2".
|
||||
*/
|
||||
static int __init check_crashkernel_memory(unsigned long pbase, size_t size)
|
||||
{
|
||||
if (ia64_platform_is("sn2") || ia64_platform_is("uv"))
|
||||
return 1;
|
||||
else
|
||||
return pbase < (1UL << 32);
|
||||
}
|
||||
|
||||
static void __init setup_crashkernel(unsigned long total, int *n)
|
||||
{
|
||||
unsigned long long base = 0, size = 0;
|
||||
@ -252,6 +271,16 @@ static void __init setup_crashkernel(unsigned long total, int *n)
|
||||
base = kdump_find_rsvd_region(size,
|
||||
rsvd_region, *n);
|
||||
}
|
||||
|
||||
if (!check_crashkernel_memory(base, size)) {
|
||||
pr_warning("crashkernel: There would be kdump memory "
|
||||
"at %ld GB but this is unusable because it "
|
||||
"must\nbe below 4 GB. Change the memory "
|
||||
"configuration of the machine.\n",
|
||||
(unsigned long)(base >> 30));
|
||||
return;
|
||||
}
|
||||
|
||||
if (base != ~0UL) {
|
||||
printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
|
||||
"for crashkernel (System RAM: %ldMB)\n",
|
||||
|
@ -719,3 +719,28 @@ out:
|
||||
EXPORT_SYMBOL_GPL(remove_memory);
|
||||
#endif /* CONFIG_MEMORY_HOTREMOVE */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Even when CONFIG_IA32_SUPPORT is not enabled it is
|
||||
* useful to have the Linux/x86 domain registered to
|
||||
* avoid an attempted module load when emulators call
|
||||
* personality(PER_LINUX32). This saves several milliseconds
|
||||
* on each such call.
|
||||
*/
|
||||
static struct exec_domain ia32_exec_domain;
|
||||
|
||||
static int __init
|
||||
per_linux32_init(void)
|
||||
{
|
||||
ia32_exec_domain.name = "Linux/x86";
|
||||
ia32_exec_domain.handler = NULL;
|
||||
ia32_exec_domain.pers_low = PER_LINUX32;
|
||||
ia32_exec_domain.pers_high = PER_LINUX32;
|
||||
ia32_exec_domain.signal_map = default_exec_domain.signal_map;
|
||||
ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
|
||||
register_exec_domain(&ia32_exec_domain);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
__initcall(per_linux32_init);
|
||||
|
12
arch/ia64/uv/Makefile
Normal file
12
arch/ia64/uv/Makefile
Normal file
@ -0,0 +1,12 @@
|
||||
# arch/ia64/uv/Makefile
|
||||
#
|
||||
# This file is subject to the terms and conditions of the GNU General Public
|
||||
# License. See the file "COPYING" in the main directory of this archive
|
||||
# for more details.
|
||||
#
|
||||
# Copyright (C) 2008 Silicon Graphics, Inc. All Rights Reserved.
|
||||
#
|
||||
# Makefile for the sn uv subplatform
|
||||
#
|
||||
|
||||
obj-y += kernel/
|
13
arch/ia64/uv/kernel/Makefile
Normal file
13
arch/ia64/uv/kernel/Makefile
Normal file
@ -0,0 +1,13 @@
|
||||
# arch/ia64/uv/kernel/Makefile
|
||||
#
|
||||
# This file is subject to the terms and conditions of the GNU General Public
|
||||
# License. See the file "COPYING" in the main directory of this archive
|
||||
# for more details.
|
||||
#
|
||||
# Copyright (C) 2008 Silicon Graphics, Inc. All Rights Reserved.
|
||||
#
|
||||
|
||||
EXTRA_CFLAGS += -Iarch/ia64/sn/include
|
||||
|
||||
obj-y += setup.o
|
||||
obj-$(CONFIG_IA64_GENERIC) += machvec.o
|
11
arch/ia64/uv/kernel/machvec.c
Normal file
11
arch/ia64/uv/kernel/machvec.c
Normal file
@ -0,0 +1,11 @@
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
|
||||
*/
|
||||
|
||||
#define MACHVEC_PLATFORM_NAME uv
|
||||
#define MACHVEC_PLATFORM_HEADER <asm/machvec_uv.h>
|
||||
#include <asm/machvec_init.h>
|
98
arch/ia64/uv/kernel/setup.c
Normal file
98
arch/ia64/uv/kernel/setup.c
Normal file
@ -0,0 +1,98 @@
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* SGI UV Core Functions
|
||||
*
|
||||
* Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <asm/sn/simulator.h>
|
||||
#include <asm/uv/uv_mmrs.h>
|
||||
#include <asm/uv/uv_hub.h>
|
||||
|
||||
DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
|
||||
|
||||
|
||||
struct redir_addr {
|
||||
unsigned long redirect;
|
||||
unsigned long alias;
|
||||
};
|
||||
|
||||
#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
|
||||
|
||||
static __initdata struct redir_addr redir_addrs[] = {
|
||||
{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG},
|
||||
{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG},
|
||||
{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG},
|
||||
};
|
||||
|
||||
static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
|
||||
{
|
||||
union uvh_si_alias0_overlay_config_u alias;
|
||||
union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
|
||||
alias.v = uv_read_local_mmr(redir_addrs[i].alias);
|
||||
if (alias.s.base == 0) {
|
||||
*size = (1UL << alias.s.m_alias);
|
||||
redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
|
||||
*base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
|
||||
return;
|
||||
}
|
||||
}
|
||||
BUG();
|
||||
}
|
||||
|
||||
void __init uv_setup(char **cmdline_p)
|
||||
{
|
||||
union uvh_si_addr_map_config_u m_n_config;
|
||||
union uvh_node_id_u node_id;
|
||||
unsigned long gnode_upper;
|
||||
int nid, cpu, m_val, n_val;
|
||||
unsigned long mmr_base, lowmem_redir_base, lowmem_redir_size;
|
||||
|
||||
if (IS_MEDUSA()) {
|
||||
lowmem_redir_base = 0;
|
||||
lowmem_redir_size = 0;
|
||||
node_id.v = 0;
|
||||
m_n_config.s.m_skt = 37;
|
||||
m_n_config.s.n_skt = 0;
|
||||
mmr_base = 0;
|
||||
} else {
|
||||
get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
|
||||
node_id.v = uv_read_local_mmr(UVH_NODE_ID);
|
||||
m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
|
||||
mmr_base =
|
||||
uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
|
||||
~UV_MMR_ENABLE;
|
||||
}
|
||||
|
||||
m_val = m_n_config.s.m_skt;
|
||||
n_val = m_n_config.s.n_skt;
|
||||
printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
|
||||
|
||||
gnode_upper = (((unsigned long)node_id.s.node_id) &
|
||||
~((1 << n_val) - 1)) << m_val;
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
nid = cpu_to_node(cpu);
|
||||
uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
|
||||
uv_cpu_hub_info(cpu)->lowmem_remap_top =
|
||||
lowmem_redir_base + lowmem_redir_size;
|
||||
uv_cpu_hub_info(cpu)->m_val = m_val;
|
||||
uv_cpu_hub_info(cpu)->n_val = m_val;
|
||||
uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) -1;
|
||||
uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
|
||||
uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
|
||||
uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
|
||||
uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */
|
||||
printk(KERN_DEBUG "UV cpu %d, nid %d\n", cpu, nid);
|
||||
}
|
||||
}
|
||||
|
@ -126,6 +126,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
|
||||
# include <asm/machvec_hpzx1_swiotlb.h>
|
||||
# elif defined (CONFIG_IA64_SGI_SN2)
|
||||
# include <asm/machvec_sn2.h>
|
||||
# elif defined (CONFIG_IA64_SGI_UV)
|
||||
# include <asm/machvec_uv.h>
|
||||
# elif defined (CONFIG_IA64_GENERIC)
|
||||
|
||||
# ifdef MACHVEC_PLATFORM_HEADER
|
||||
|
26
include/asm-ia64/machvec_uv.h
Normal file
26
include/asm-ia64/machvec_uv.h
Normal file
@ -0,0 +1,26 @@
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* SGI UV Core Functions
|
||||
*
|
||||
* Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_MACHVEC_UV_H
|
||||
#define _ASM_IA64_MACHVEC_UV_H
|
||||
|
||||
extern ia64_mv_setup_t uv_setup;
|
||||
|
||||
/*
|
||||
* This stuff has dual use!
|
||||
*
|
||||
* For a generic kernel, the macros are used to initialize the
|
||||
* platform's machvec structure. When compiling a non-generic kernel,
|
||||
* the macros are used directly.
|
||||
*/
|
||||
#define platform_name "uv"
|
||||
#define platform_setup uv_setup
|
||||
|
||||
#endif /* _ASM_IA64_MACHVEC_UV_H */
|
309
include/asm-ia64/uv/uv_hub.h
Normal file
309
include/asm-ia64/uv/uv_hub.h
Normal file
@ -0,0 +1,309 @@
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* SGI UV architectural definitions
|
||||
*
|
||||
* Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_IA64_UV_HUB_H__
|
||||
#define __ASM_IA64_UV_HUB_H__
|
||||
|
||||
#include <linux/numa.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <asm/types.h>
|
||||
#include <asm/percpu.h>
|
||||
|
||||
|
||||
/*
|
||||
* Addressing Terminology
|
||||
*
|
||||
* M - The low M bits of a physical address represent the offset
|
||||
* into the blade local memory. RAM memory on a blade is physically
|
||||
* contiguous (although various IO spaces may punch holes in
|
||||
* it)..
|
||||
*
|
||||
* N - Number of bits in the node portion of a socket physical
|
||||
* address.
|
||||
*
|
||||
* NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
|
||||
* routers always have low bit of 1, C/MBricks have low bit
|
||||
* equal to 0. Most addressing macros that target UV hub chips
|
||||
* right shift the NASID by 1 to exclude the always-zero bit.
|
||||
* NASIDs contain up to 15 bits.
|
||||
*
|
||||
* GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
|
||||
* of nasids.
|
||||
*
|
||||
* PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
|
||||
* of the nasid for socket usage.
|
||||
*
|
||||
*
|
||||
* NumaLink Global Physical Address Format:
|
||||
* +--------------------------------+---------------------+
|
||||
* |00..000| GNODE | NodeOffset |
|
||||
* +--------------------------------+---------------------+
|
||||
* |<-------53 - M bits --->|<--------M bits ----->
|
||||
*
|
||||
* M - number of node offset bits (35 .. 40)
|
||||
*
|
||||
*
|
||||
* Memory/UV-HUB Processor Socket Address Format:
|
||||
* +----------------+---------------+---------------------+
|
||||
* |00..000000000000| PNODE | NodeOffset |
|
||||
* +----------------+---------------+---------------------+
|
||||
* <--- N bits --->|<--------M bits ----->
|
||||
*
|
||||
* M - number of node offset bits (35 .. 40)
|
||||
* N - number of PNODE bits (0 .. 10)
|
||||
*
|
||||
* Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64).
|
||||
* The actual values are configuration dependent and are set at
|
||||
* boot time. M & N values are set by the hardware/BIOS at boot.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Maximum number of bricks in all partitions and in all coherency domains.
|
||||
* This is the total number of bricks accessible in the numalink fabric. It
|
||||
* includes all C & M bricks. Routers are NOT included.
|
||||
*
|
||||
* This value is also the value of the maximum number of non-router NASIDs
|
||||
* in the numalink fabric.
|
||||
*
|
||||
* NOTE: a brick may contain 1 or 2 OS nodes. Don't get these confused.
|
||||
*/
|
||||
#define UV_MAX_NUMALINK_BLADES 16384
|
||||
|
||||
/*
|
||||
* Maximum number of C/Mbricks within a software SSI (hardware may support
|
||||
* more).
|
||||
*/
|
||||
#define UV_MAX_SSI_BLADES 1
|
||||
|
||||
/*
|
||||
* The largest possible NASID of a C or M brick (+ 2)
|
||||
*/
|
||||
#define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_NODES * 2)
|
||||
|
||||
/*
|
||||
* The following defines attributes of the HUB chip. These attributes are
|
||||
* frequently referenced and are kept in the per-cpu data areas of each cpu.
|
||||
* They are kept together in a struct to minimize cache misses.
|
||||
*/
|
||||
struct uv_hub_info_s {
|
||||
unsigned long global_mmr_base;
|
||||
unsigned long gpa_mask;
|
||||
unsigned long gnode_upper;
|
||||
unsigned long lowmem_remap_top;
|
||||
unsigned long lowmem_remap_base;
|
||||
unsigned short pnode;
|
||||
unsigned short pnode_mask;
|
||||
unsigned short coherency_domain_number;
|
||||
unsigned short numa_blade_id;
|
||||
unsigned char blade_processor_id;
|
||||
unsigned char m_val;
|
||||
unsigned char n_val;
|
||||
};
|
||||
DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
|
||||
#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
|
||||
#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
|
||||
|
||||
/*
|
||||
* Local & Global MMR space macros.
|
||||
* Note: macros are intended to be used ONLY by inline functions
|
||||
* in this file - not by other kernel code.
|
||||
* n - NASID (full 15-bit global nasid)
|
||||
* g - GNODE (full 15-bit global nasid, right shifted 1)
|
||||
* p - PNODE (local part of nsids, right shifted 1)
|
||||
*/
|
||||
#define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask)
|
||||
#define UV_PNODE_TO_NASID(p) (((p) << 1) | uv_hub_info->gnode_upper)
|
||||
|
||||
#define UV_LOCAL_MMR_BASE 0xf4000000UL
|
||||
#define UV_GLOBAL_MMR32_BASE 0xf8000000UL
|
||||
#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
|
||||
|
||||
#define UV_GLOBAL_MMR32_PNODE_SHIFT 15
|
||||
#define UV_GLOBAL_MMR64_PNODE_SHIFT 26
|
||||
|
||||
#define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
|
||||
|
||||
#define UV_GLOBAL_MMR64_PNODE_BITS(p) \
|
||||
((unsigned long)(p) << UV_GLOBAL_MMR64_PNODE_SHIFT)
|
||||
|
||||
/*
|
||||
* Macros for converting between kernel virtual addresses, socket local physical
|
||||
* addresses, and UV global physical addresses.
|
||||
* Note: use the standard __pa() & __va() macros for converting
|
||||
* between socket virtual and socket physical addresses.
|
||||
*/
|
||||
|
||||
/* socket phys RAM --> UV global physical address */
|
||||
static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
|
||||
{
|
||||
if (paddr < uv_hub_info->lowmem_remap_top)
|
||||
paddr += uv_hub_info->lowmem_remap_base;
|
||||
return paddr | uv_hub_info->gnode_upper;
|
||||
}
|
||||
|
||||
|
||||
/* socket virtual --> UV global physical address */
|
||||
static inline unsigned long uv_gpa(void *v)
|
||||
{
|
||||
return __pa(v) | uv_hub_info->gnode_upper;
|
||||
}
|
||||
|
||||
/* socket virtual --> UV global physical address */
|
||||
static inline void *uv_vgpa(void *v)
|
||||
{
|
||||
return (void *)uv_gpa(v);
|
||||
}
|
||||
|
||||
/* UV global physical address --> socket virtual */
|
||||
static inline void *uv_va(unsigned long gpa)
|
||||
{
|
||||
return __va(gpa & uv_hub_info->gpa_mask);
|
||||
}
|
||||
|
||||
/* pnode, offset --> socket virtual */
|
||||
static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
|
||||
{
|
||||
return __va(((unsigned long)pnode << uv_hub_info->m_val) | offset);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Access global MMRs using the low memory MMR32 space. This region supports
|
||||
* faster MMR access but not all MMRs are accessible in this space.
|
||||
*/
|
||||
static inline unsigned long *uv_global_mmr32_address(int pnode,
|
||||
unsigned long offset)
|
||||
{
|
||||
return __va(UV_GLOBAL_MMR32_BASE |
|
||||
UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
|
||||
}
|
||||
|
||||
static inline void uv_write_global_mmr32(int pnode, unsigned long offset,
|
||||
unsigned long val)
|
||||
{
|
||||
*uv_global_mmr32_address(pnode, offset) = val;
|
||||
}
|
||||
|
||||
static inline unsigned long uv_read_global_mmr32(int pnode,
|
||||
unsigned long offset)
|
||||
{
|
||||
return *uv_global_mmr32_address(pnode, offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* Access Global MMR space using the MMR space located at the top of physical
|
||||
* memory.
|
||||
*/
|
||||
static inline unsigned long *uv_global_mmr64_address(int pnode,
|
||||
unsigned long offset)
|
||||
{
|
||||
return __va(UV_GLOBAL_MMR64_BASE |
|
||||
UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
|
||||
}
|
||||
|
||||
static inline void uv_write_global_mmr64(int pnode, unsigned long offset,
|
||||
unsigned long val)
|
||||
{
|
||||
*uv_global_mmr64_address(pnode, offset) = val;
|
||||
}
|
||||
|
||||
static inline unsigned long uv_read_global_mmr64(int pnode,
|
||||
unsigned long offset)
|
||||
{
|
||||
return *uv_global_mmr64_address(pnode, offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* Access hub local MMRs. Faster than using global space but only local MMRs
|
||||
* are accessible.
|
||||
*/
|
||||
static inline unsigned long *uv_local_mmr_address(unsigned long offset)
|
||||
{
|
||||
return __va(UV_LOCAL_MMR_BASE | offset);
|
||||
}
|
||||
|
||||
static inline unsigned long uv_read_local_mmr(unsigned long offset)
|
||||
{
|
||||
return *uv_local_mmr_address(offset);
|
||||
}
|
||||
|
||||
static inline void uv_write_local_mmr(unsigned long offset, unsigned long val)
|
||||
{
|
||||
*uv_local_mmr_address(offset) = val;
|
||||
}
|
||||
|
||||
/*
|
||||
* Structures and definitions for converting between cpu, node, pnode, and blade
|
||||
* numbers.
|
||||
*/
|
||||
|
||||
/* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */
|
||||
static inline int uv_blade_processor_id(void)
|
||||
{
|
||||
return smp_processor_id();
|
||||
}
|
||||
|
||||
/* Blade number of current cpu. Numnbered 0 .. <#blades -1> */
|
||||
static inline int uv_numa_blade_id(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Convert a cpu number to the the UV blade number */
|
||||
static inline int uv_cpu_to_blade_id(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Convert linux node number to the UV blade number */
|
||||
static inline int uv_node_to_blade_id(int nid)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Convert a blade id to the PNODE of the blade */
|
||||
static inline int uv_blade_to_pnode(int bid)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Determine the number of possible cpus on a blade */
|
||||
static inline int uv_blade_nr_possible_cpus(int bid)
|
||||
{
|
||||
return num_possible_cpus();
|
||||
}
|
||||
|
||||
/* Determine the number of online cpus on a blade */
|
||||
static inline int uv_blade_nr_online_cpus(int bid)
|
||||
{
|
||||
return num_online_cpus();
|
||||
}
|
||||
|
||||
/* Convert a cpu id to the PNODE of the blade containing the cpu */
|
||||
static inline int uv_cpu_to_pnode(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Convert a linux node number to the PNODE of the blade */
|
||||
static inline int uv_node_to_pnode(int nid)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Maximum possible number of blades */
|
||||
static inline int uv_num_possible_blades(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif /* __ASM_IA64_UV_HUB__ */
|
||||
|
266
include/asm-ia64/uv/uv_mmrs.h
Normal file
266
include/asm-ia64/uv/uv_mmrs.h
Normal file
@ -0,0 +1,266 @@
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* SGI UV MMR definitions
|
||||
*
|
||||
* Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_IA64_UV_MMRS__
|
||||
#define __ASM_IA64_UV_MMRS__
|
||||
|
||||
/*
|
||||
* AUTO GENERATED - Do not edit
|
||||
*/
|
||||
|
||||
#define UV_MMR_ENABLE (1UL << 63)
|
||||
|
||||
/* ========================================================================= */
|
||||
/* UVH_NODE_ID */
|
||||
/* ========================================================================= */
|
||||
#define UVH_NODE_ID 0x0UL
|
||||
|
||||
#define UVH_NODE_ID_FORCE1_SHFT 0
|
||||
#define UVH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
|
||||
#define UVH_NODE_ID_MANUFACTURER_SHFT 1
|
||||
#define UVH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
|
||||
#define UVH_NODE_ID_PART_NUMBER_SHFT 12
|
||||
#define UVH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
|
||||
#define UVH_NODE_ID_REVISION_SHFT 28
|
||||
#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
|
||||
#define UVH_NODE_ID_NODE_ID_SHFT 32
|
||||
#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
|
||||
#define UVH_NODE_ID_NODES_PER_BIT_SHFT 48
|
||||
#define UVH_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
|
||||
#define UVH_NODE_ID_NI_PORT_SHFT 56
|
||||
#define UVH_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
|
||||
|
||||
union uvh_node_id_u {
|
||||
unsigned long v;
|
||||
struct uvh_node_id_s {
|
||||
unsigned long force1 : 1; /* RO */
|
||||
unsigned long manufacturer : 11; /* RO */
|
||||
unsigned long part_number : 16; /* RO */
|
||||
unsigned long revision : 4; /* RO */
|
||||
unsigned long node_id : 15; /* RW */
|
||||
unsigned long rsvd_47 : 1; /* */
|
||||
unsigned long nodes_per_bit : 7; /* RW */
|
||||
unsigned long rsvd_55 : 1; /* */
|
||||
unsigned long ni_port : 4; /* RO */
|
||||
unsigned long rsvd_60_63 : 4; /* */
|
||||
} s;
|
||||
};
|
||||
|
||||
/* ========================================================================= */
|
||||
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
|
||||
/* ========================================================================= */
|
||||
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
|
||||
|
||||
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
|
||||
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
|
||||
|
||||
union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
|
||||
unsigned long v;
|
||||
struct uvh_rh_gam_alias210_redirect_config_0_mmr_s {
|
||||
unsigned long rsvd_0_23 : 24; /* */
|
||||
unsigned long dest_base : 22; /* RW */
|
||||
unsigned long rsvd_46_63: 18; /* */
|
||||
} s;
|
||||
};
|
||||
|
||||
/* ========================================================================= */
|
||||
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */
|
||||
/* ========================================================================= */
|
||||
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
|
||||
|
||||
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
|
||||
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
|
||||
|
||||
union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
|
||||
unsigned long v;
|
||||
struct uvh_rh_gam_alias210_redirect_config_1_mmr_s {
|
||||
unsigned long rsvd_0_23 : 24; /* */
|
||||
unsigned long dest_base : 22; /* RW */
|
||||
unsigned long rsvd_46_63: 18; /* */
|
||||
} s;
|
||||
};
|
||||
|
||||
/* ========================================================================= */
|
||||
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */
|
||||
/* ========================================================================= */
|
||||
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
|
||||
|
||||
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
|
||||
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
|
||||
|
||||
union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
|
||||
unsigned long v;
|
||||
struct uvh_rh_gam_alias210_redirect_config_2_mmr_s {
|
||||
unsigned long rsvd_0_23 : 24; /* */
|
||||
unsigned long dest_base : 22; /* RW */
|
||||
unsigned long rsvd_46_63: 18; /* */
|
||||
} s;
|
||||
};
|
||||
|
||||
/* ========================================================================= */
|
||||
/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
|
||||
/* ========================================================================= */
|
||||
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
|
||||
|
||||
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
|
||||
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
|
||||
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 46
|
||||
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0000400000000000UL
|
||||
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
|
||||
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
|
||||
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
|
||||
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
|
||||
|
||||
union uvh_rh_gam_gru_overlay_config_mmr_u {
|
||||
unsigned long v;
|
||||
struct uvh_rh_gam_gru_overlay_config_mmr_s {
|
||||
unsigned long rsvd_0_27: 28; /* */
|
||||
unsigned long base : 18; /* RW */
|
||||
unsigned long gr4 : 1; /* RW */
|
||||
unsigned long rsvd_47_51: 5; /* */
|
||||
unsigned long n_gru : 4; /* RW */
|
||||
unsigned long rsvd_56_62: 7; /* */
|
||||
unsigned long enable : 1; /* RW */
|
||||
} s;
|
||||
};
|
||||
|
||||
/* ========================================================================= */
|
||||
/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */
|
||||
/* ========================================================================= */
|
||||
#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
|
||||
|
||||
#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
|
||||
#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
|
||||
#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
|
||||
#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
|
||||
#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
|
||||
#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
|
||||
|
||||
union uvh_rh_gam_mmr_overlay_config_mmr_u {
|
||||
unsigned long v;
|
||||
struct uvh_rh_gam_mmr_overlay_config_mmr_s {
|
||||
unsigned long rsvd_0_25: 26; /* */
|
||||
unsigned long base : 20; /* RW */
|
||||
unsigned long dual_hub : 1; /* RW */
|
||||
unsigned long rsvd_47_62: 16; /* */
|
||||
unsigned long enable : 1; /* RW */
|
||||
} s;
|
||||
};
|
||||
|
||||
/* ========================================================================= */
|
||||
/* UVH_RTC */
|
||||
/* ========================================================================= */
|
||||
#define UVH_RTC 0x28000UL
|
||||
|
||||
#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
|
||||
#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
|
||||
|
||||
union uvh_rtc_u {
|
||||
unsigned long v;
|
||||
struct uvh_rtc_s {
|
||||
unsigned long real_time_clock : 56; /* RW */
|
||||
unsigned long rsvd_56_63 : 8; /* */
|
||||
} s;
|
||||
};
|
||||
|
||||
/* ========================================================================= */
|
||||
/* UVH_SI_ADDR_MAP_CONFIG */
|
||||
/* ========================================================================= */
|
||||
#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
|
||||
|
||||
#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0
|
||||
#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
|
||||
#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8
|
||||
#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL
|
||||
|
||||
union uvh_si_addr_map_config_u {
|
||||
unsigned long v;
|
||||
struct uvh_si_addr_map_config_s {
|
||||
unsigned long m_skt : 6; /* RW */
|
||||
unsigned long rsvd_6_7: 2; /* */
|
||||
unsigned long n_skt : 4; /* RW */
|
||||
unsigned long rsvd_12_63: 52; /* */
|
||||
} s;
|
||||
};
|
||||
|
||||
/* ========================================================================= */
|
||||
/* UVH_SI_ALIAS0_OVERLAY_CONFIG */
|
||||
/* ========================================================================= */
|
||||
#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL
|
||||
|
||||
#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24
|
||||
#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
|
||||
#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
|
||||
#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
|
||||
#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63
|
||||
#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
|
||||
|
||||
union uvh_si_alias0_overlay_config_u {
|
||||
unsigned long v;
|
||||
struct uvh_si_alias0_overlay_config_s {
|
||||
unsigned long rsvd_0_23: 24; /* */
|
||||
unsigned long base : 8; /* RW */
|
||||
unsigned long rsvd_32_47: 16; /* */
|
||||
unsigned long m_alias : 5; /* RW */
|
||||
unsigned long rsvd_53_62: 10; /* */
|
||||
unsigned long enable : 1; /* RW */
|
||||
} s;
|
||||
};
|
||||
|
||||
/* ========================================================================= */
|
||||
/* UVH_SI_ALIAS1_OVERLAY_CONFIG */
|
||||
/* ========================================================================= */
|
||||
#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL
|
||||
|
||||
#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24
|
||||
#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
|
||||
#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
|
||||
#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
|
||||
#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63
|
||||
#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
|
||||
|
||||
union uvh_si_alias1_overlay_config_u {
|
||||
unsigned long v;
|
||||
struct uvh_si_alias1_overlay_config_s {
|
||||
unsigned long rsvd_0_23: 24; /* */
|
||||
unsigned long base : 8; /* RW */
|
||||
unsigned long rsvd_32_47: 16; /* */
|
||||
unsigned long m_alias : 5; /* RW */
|
||||
unsigned long rsvd_53_62: 10; /* */
|
||||
unsigned long enable : 1; /* RW */
|
||||
} s;
|
||||
};
|
||||
|
||||
/* ========================================================================= */
|
||||
/* UVH_SI_ALIAS2_OVERLAY_CONFIG */
|
||||
/* ========================================================================= */
|
||||
#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL
|
||||
|
||||
#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24
|
||||
#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
|
||||
#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
|
||||
#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
|
||||
#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63
|
||||
#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
|
||||
|
||||
union uvh_si_alias2_overlay_config_u {
|
||||
unsigned long v;
|
||||
struct uvh_si_alias2_overlay_config_s {
|
||||
unsigned long rsvd_0_23: 24; /* */
|
||||
unsigned long base : 8; /* RW */
|
||||
unsigned long rsvd_32_47: 16; /* */
|
||||
unsigned long m_alias : 5; /* RW */
|
||||
unsigned long rsvd_53_62: 10; /* */
|
||||
unsigned long enable : 1; /* RW */
|
||||
} s;
|
||||
};
|
||||
|
||||
|
||||
#endif /* __ASM_IA64_UV_MMRS__ */
|
Loading…
Reference in New Issue
Block a user