2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-23 18:14:04 +08:00

asm-generic updates for v6.7

The ia64 architecture gets its well-earned retirement as planned,
 now that there is one last (mostly) working release that will
 be maintained as an LTS kernel.
 
 The architecture specific system call tables are updated for
 the added map_shadow_stack() syscall and to remove references
 to the long-gone sys_lookup_dcookie() syscall.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEiK/NIGsWEZVxh/FrYKtH/8kJUicFAmVC40IACgkQYKtH/8kJ
 Uidhmw/9EX+aWSXGoObJ3fngaNSMw+PmrEuP8qEKBHxfKHcCdX3hc451Oh4GlhaQ
 tru91pPwgNvN2/rfoKusxT+V4PemGIzfNni/04rp+P0kvmdw5otQ2yNhsQNsfVmq
 XGWvkxF4P2GO6bkjjfR/1dDq7GtlyXtwwPDKeLbYb6TnJOZjtx+EAN27kkfSn1Ms
 R4Sa3zJ+DfHUmHL5S9g+7UD/CZ5GfKNmIskI4Mz5GsfoUz/0iiU+Bge/9sdcdSJQ
 kmbLy5YnVzfooLZ3TQmBFsO3iAMWb0s/mDdtyhqhTVmTUshLolkPYyKnPFvdupyv
 shXcpEST2XJNeaDRnL2K4zSCdxdbnCZHDpjfl9wfioBg7I8NfhXKpf1jYZHH1de4
 LXq8ndEFEOVQw/zSpYWfQq1sux8Jiqr+UK/ukbVeFWiGGIUs91gEWtPAf8T0AZo9
 ujkJvaWGl98O1g5wmBu0/dAR6QcFJMDfVwbmlIFpU8O+MEaz6X8mM+O5/T0IyTcD
 eMbAUjj4uYcU7ihKzHEv/0SS9Of38kzff67CLN5k8wOP/9NlaGZ78o1bVle9b52A
 BdhrsAefFiWHp1jT6Y9Rg4HOO/TguQ9e6EWSKOYFulsiLH9LEFaB9RwZLeLytV0W
 vlAgY9rUW77g1OJcb7DoNv33nRFuxsKqsnz3DEIXtgozo9CzbYI=
 =H1vH
 -----END PGP SIGNATURE-----

Merge tag 'asm-generic-6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic

Pull ia64 removal and asm-generic updates from Arnd Bergmann:

 - The ia64 architecture gets its well-earned retirement as planned,
   now that there is one last (mostly) working release that will be
   maintained as an LTS kernel.

 - The architecture specific system call tables are updated for the
   added map_shadow_stack() syscall and to remove references to the
   long-gone sys_lookup_dcookie() syscall.

* tag 'asm-generic-6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic:
  hexagon: Remove unusable symbols from the ptrace.h uapi
  asm-generic: Fix spelling of architecture
  arch: Reserve map_shadow_stack() syscall number for all architectures
  syscalls: Cleanup references to sys_lookup_dcookie()
  Documentation: Drop or replace remaining mentions of IA64
  lib/raid6: Drop IA64 support
  Documentation: Drop IA64 from feature descriptions
  kernel: Drop IA64 support from sig_fault handlers
  arch: Remove Itanium (IA-64) architecture
This commit is contained in:
Linus Torvalds 2023-11-01 15:28:33 -10:00
commit 1e0c505e13
453 changed files with 142 additions and 65219 deletions

View File

@ -354,9 +354,6 @@ Description: Parameters for the CPU cache attributes
- ReadWriteAllocate:
both writeallocate and readallocate
attributes:
LEGACY used only on IA64 and is same as write_policy
coherency_line_size:
the minimum amount of data in bytes that gets
transferred from memory to cache

View File

@ -2,7 +2,7 @@ What: /sys/firmware/dmi/entries/
Date: February 2011
Contact: Mike Waychison <mikew@google.com>
Description:
Many machines' firmware (x86 and ia64) export DMI /
Many machines' firmware (x86 and arm64) export DMI /
SMBIOS tables to the operating system. Getting at this
information is often valuable to userland, especially in
cases where there are OEM extensions used.

View File

@ -17,7 +17,7 @@ You can use common commands, such as cp, scp or makedumpfile to copy
the memory image to a dump file on the local disk, or across the network
to a remote system.
Kdump and kexec are currently supported on the x86, x86_64, ppc64, ia64,
Kdump and kexec are currently supported on the x86, x86_64, ppc64,
s390x, arm and arm64 architectures.
When the system kernel boots, it reserves a small section of memory for
@ -113,7 +113,7 @@ There are two possible methods of using Kdump.
2) Or use the system kernel binary itself as dump-capture kernel and there is
no need to build a separate dump-capture kernel. This is possible
only with the architectures which support a relocatable kernel. As
of today, i386, x86_64, ppc64, ia64, arm and arm64 architectures support
of today, i386, x86_64, ppc64, arm and arm64 architectures support
relocatable kernel.
Building a relocatable kernel is advantageous from the point of view that
@ -236,24 +236,6 @@ Dump-capture kernel config options (Arch Dependent, ppc64)
Make and install the kernel and its modules.
Dump-capture kernel config options (Arch Dependent, ia64)
----------------------------------------------------------
- No specific options are required to create a dump-capture kernel
for ia64, other than those specified in the arch independent section
above. This means that it is possible to use the system kernel
as a dump-capture kernel if desired.
The crashkernel region can be automatically placed by the system
kernel at runtime. This is done by specifying the base address as 0,
or omitting it all together::
crashkernel=256M@0
or::
crashkernel=256M
Dump-capture kernel config options (Arch Dependent, arm)
----------------------------------------------------------
@ -348,11 +330,6 @@ Boot into System Kernel
On ppc64, use "crashkernel=128M@32M".
On ia64, 256M@256M is a generous value that typically works.
The region may be automatically placed on ia64, see the
dump-capture kernel config option notes above.
If use sparse memory, the size should be rounded to GRANULE boundaries.
On s390x, typically use "crashkernel=xxM". The value of xx is dependent
on the memory consumption of the kdump system. In general this is not
dependent on the memory size of the production system.
@ -383,10 +360,6 @@ For ppc64:
- Use vmlinux
For ia64:
- Use vmlinux or vmlinuz.gz
For s390x:
- Use image or bzImage
@ -428,14 +401,10 @@ to load dump-capture kernel::
--initrd=<initrd-for-dump-capture-kernel> \
--append="root=<root-dev> <arch-specific-options>"
Please note, that --args-linux does not need to be specified for ia64.
It is planned to make this a no-op on that architecture, but for now
it should be omitted
Following are the arch specific command line options to be used while
loading dump-capture kernel.
For i386, x86_64 and ia64:
For i386 and x86_64:
"1 irqpoll nr_cpus=1 reset_devices"

View File

@ -413,36 +413,6 @@ of a higher page table lookup overhead, and also consumes more page
table space per process. Used to check whether PAE was enabled in the
crash kernel when converting virtual addresses to physical addresses.
ia64
====
pgdat_list|(pgdat_list, MAX_NUMNODES)
-------------------------------------
pg_data_t array storing all NUMA nodes information. MAX_NUMNODES
indicates the number of the nodes.
node_memblk|(node_memblk, NR_NODE_MEMBLKS)
------------------------------------------
List of node memory chunks. Filled when parsing the SRAT table to obtain
information about memory nodes. NR_NODE_MEMBLKS indicates the number of
node memory chunks.
These values are used to compute the number of nodes the crashed kernel used.
node_memblk_s|(node_memblk_s, start_paddr)|(node_memblk_s, size)
----------------------------------------------------------------
The size of a struct node_memblk_s and the offsets of the
node_memblk_s's members. Used to compute the number of nodes.
PGTABLE_3|PGTABLE_4
-------------------
User-space tools need to know whether the crash kernel was in 3-level or
4-level paging mode. Used to distinguish the page table.
ARM64
=====

View File

@ -1453,7 +1453,7 @@
See comment before function elanfreq_setup() in
arch/x86/kernel/cpu/cpufreq/elanfreq.c.
elfcorehdr=[size[KMG]@]offset[KMG] [IA64,PPC,SH,X86,S390]
elfcorehdr=[size[KMG]@]offset[KMG] [PPC,SH,X86,S390]
Specifies physical address of start of kernel core
image elf header and optionally the size. Generally
kexec loader will pass this option to capture kernel.
@ -1516,12 +1516,6 @@
floppy= [HW]
See Documentation/admin-guide/blockdev/floppy.rst.
force_pal_cache_flush
[IA-64] Avoid check_sal_cache_flush which may hang on
buggy SAL_CACHE_FLUSH implementations. Using this
parameter will force ia64_sal_cache_flush to call
ia64_pal_cache_flush instead of SAL_CACHE_FLUSH.
forcepae [X86-32]
Forcefully enable Physical Address Extension (PAE).
Many Pentium M systems disable PAE but may have a

View File

@ -33,7 +33,7 @@ used to expose persistent memory, other performance-differentiated memory and
reserved memory regions as ordinary system RAM to Linux.
Linux only supports memory hot(un)plug on selected 64 bit architectures, such as
x86_64, arm64, ppc64, s390x and ia64.
x86_64, arm64, ppc64 and s390x.
Memory Hot(Un)Plug Granularity
------------------------------

View File

@ -436,7 +436,7 @@ ignore-unaligned-usertrap
On architectures where unaligned accesses cause traps, and where this
feature is supported (``CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN``;
currently, ``arc``, ``ia64`` and ``loongarch``), controls whether all
currently, ``arc`` and ``loongarch``), controls whether all
unaligned traps are logged.
= =============================================================
@ -445,10 +445,7 @@ unaligned traps are logged.
setting.
= =============================================================
See also `unaligned-trap`_ and `unaligned-dump-stack`_. On ``ia64``,
this allows system administrators to override the
``IA64_THREAD_UAC_NOPRINT`` ``prctl`` and avoid logs being flooded.
See also `unaligned-trap`_.
io_uring_disabled
=================
@ -1539,22 +1536,6 @@ See Documentation/admin-guide/kernel-parameters.rst and
Documentation/trace/boottime-trace.rst.
.. _unaligned-dump-stack:
unaligned-dump-stack (ia64)
===========================
When logging unaligned accesses, controls whether the stack is
dumped.
= ===================================================
0 Do not dump the stack. This is the default setting.
1 Dump the stack.
= ===================================================
See also `ignore-unaligned-usertrap`_.
unaligned-trap
==============

View File

@ -1,246 +0,0 @@
==================================
Memory Attribute Aliasing on IA-64
==================================
Bjorn Helgaas <bjorn.helgaas@hp.com>
May 4, 2006
Memory Attributes
=================
Itanium supports several attributes for virtual memory references.
The attribute is part of the virtual translation, i.e., it is
contained in the TLB entry. The ones of most interest to the Linux
kernel are:
== ======================
WB Write-back (cacheable)
UC Uncacheable
WC Write-coalescing
== ======================
System memory typically uses the WB attribute. The UC attribute is
used for memory-mapped I/O devices. The WC attribute is uncacheable
like UC is, but writes may be delayed and combined to increase
performance for things like frame buffers.
The Itanium architecture requires that we avoid accessing the same
page with both a cacheable mapping and an uncacheable mapping[1].
The design of the chipset determines which attributes are supported
on which regions of the address space. For example, some chipsets
support either WB or UC access to main memory, while others support
only WB access.
Memory Map
==========
Platform firmware describes the physical memory map and the
supported attributes for each region. At boot-time, the kernel uses
the EFI GetMemoryMap() interface. ACPI can also describe memory
devices and the attributes they support, but Linux/ia64 currently
doesn't use this information.
The kernel uses the efi_memmap table returned from GetMemoryMap() to
learn the attributes supported by each region of physical address
space. Unfortunately, this table does not completely describe the
address space because some machines omit some or all of the MMIO
regions from the map.
The kernel maintains another table, kern_memmap, which describes the
memory Linux is actually using and the attribute for each region.
This contains only system memory; it does not contain MMIO space.
The kern_memmap table typically contains only a subset of the system
memory described by the efi_memmap. Linux/ia64 can't use all memory
in the system because of constraints imposed by the identity mapping
scheme.
The efi_memmap table is preserved unmodified because the original
boot-time information is required for kexec.
Kernel Identity Mappings
========================
Linux/ia64 identity mappings are done with large pages, currently
either 16MB or 64MB, referred to as "granules." Cacheable mappings
are speculative[2], so the processor can read any location in the
page at any time, independent of the programmer's intentions. This
means that to avoid attribute aliasing, Linux can create a cacheable
identity mapping only when the entire granule supports cacheable
access.
Therefore, kern_memmap contains only full granule-sized regions that
can referenced safely by an identity mapping.
Uncacheable mappings are not speculative, so the processor will
generate UC accesses only to locations explicitly referenced by
software. This allows UC identity mappings to cover granules that
are only partially populated, or populated with a combination of UC
and WB regions.
User Mappings
=============
User mappings are typically done with 16K or 64K pages. The smaller
page size allows more flexibility because only 16K or 64K has to be
homogeneous with respect to memory attributes.
Potential Attribute Aliasing Cases
==================================
There are several ways the kernel creates new mappings:
mmap of /dev/mem
----------------
This uses remap_pfn_range(), which creates user mappings. These
mappings may be either WB or UC. If the region being mapped
happens to be in kern_memmap, meaning that it may also be mapped
by a kernel identity mapping, the user mapping must use the same
attribute as the kernel mapping.
If the region is not in kern_memmap, the user mapping should use
an attribute reported as being supported in the EFI memory map.
Since the EFI memory map does not describe MMIO on some
machines, this should use an uncacheable mapping as a fallback.
mmap of /sys/class/pci_bus/.../legacy_mem
-----------------------------------------
This is very similar to mmap of /dev/mem, except that legacy_mem
only allows mmap of the one megabyte "legacy MMIO" area for a
specific PCI bus. Typically this is the first megabyte of
physical address space, but it may be different on machines with
several VGA devices.
"X" uses this to access VGA frame buffers. Using legacy_mem
rather than /dev/mem allows multiple instances of X to talk to
different VGA cards.
The /dev/mem mmap constraints apply.
mmap of /proc/bus/pci/.../??.?
------------------------------
This is an MMIO mmap of PCI functions, which additionally may or
may not be requested as using the WC attribute.
If WC is requested, and the region in kern_memmap is either WC
or UC, and the EFI memory map designates the region as WC, then
the WC mapping is allowed.
Otherwise, the user mapping must use the same attribute as the
kernel mapping.
read/write of /dev/mem
----------------------
This uses copy_from_user(), which implicitly uses a kernel
identity mapping. This is obviously safe for things in
kern_memmap.
There may be corner cases of things that are not in kern_memmap,
but could be accessed this way. For example, registers in MMIO
space are not in kern_memmap, but could be accessed with a UC
mapping. This would not cause attribute aliasing. But
registers typically can be accessed only with four-byte or
eight-byte accesses, and the copy_from_user() path doesn't allow
any control over the access size, so this would be dangerous.
ioremap()
---------
This returns a mapping for use inside the kernel.
If the region is in kern_memmap, we should use the attribute
specified there.
If the EFI memory map reports that the entire granule supports
WB, we should use that (granules that are partially reserved
or occupied by firmware do not appear in kern_memmap).
If the granule contains non-WB memory, but we can cover the
region safely with kernel page table mappings, we can use
ioremap_page_range() as most other architectures do.
Failing all of the above, we have to fall back to a UC mapping.
Past Problem Cases
==================
mmap of various MMIO regions from /dev/mem by "X" on Intel platforms
--------------------------------------------------------------------
The EFI memory map may not report these MMIO regions.
These must be allowed so that X will work. This means that
when the EFI memory map is incomplete, every /dev/mem mmap must
succeed. It may create either WB or UC user mappings, depending
on whether the region is in kern_memmap or the EFI memory map.
mmap of 0x0-0x9FFFF /dev/mem by "hwinfo" on HP sx1000 with VGA enabled
----------------------------------------------------------------------
The EFI memory map reports the following attributes:
=============== ======= ==================
0x00000-0x9FFFF WB only
0xA0000-0xBFFFF UC only (VGA frame buffer)
0xC0000-0xFFFFF WB only
=============== ======= ==================
This mmap is done with user pages, not kernel identity mappings,
so it is safe to use WB mappings.
The kernel VGA driver may ioremap the VGA frame buffer at 0xA0000,
which uses a granule-sized UC mapping. This granule will cover some
WB-only memory, but since UC is non-speculative, the processor will
never generate an uncacheable reference to the WB-only areas unless
the driver explicitly touches them.
mmap of 0x0-0xFFFFF legacy_mem by "X"
-------------------------------------
If the EFI memory map reports that the entire range supports the
same attributes, we can allow the mmap (and we will prefer WB if
supported, as is the case with HP sx[12]000 machines with VGA
disabled).
If EFI reports the range as partly WB and partly UC (as on sx[12]000
machines with VGA enabled), we must fail the mmap because there's no
safe attribute to use.
If EFI reports some of the range but not all (as on Intel firmware
that doesn't report the VGA frame buffer at all), we should fail the
mmap and force the user to map just the specific region of interest.
mmap of 0xA0000-0xBFFFF legacy_mem by "X" on HP sx1000 with VGA disabled
------------------------------------------------------------------------
The EFI memory map reports the following attributes::
0x00000-0xFFFFF WB only (no VGA MMIO hole)
This is a special case of the previous case, and the mmap should
fail for the same reason as above.
read of /sys/devices/.../rom
----------------------------
For VGA devices, this may cause an ioremap() of 0xC0000. This
used to be done with a UC mapping, because the VGA frame buffer
at 0xA0000 prevents use of a WB granule. The UC mapping causes
an MCA on HP sx[12]000 chipsets.
We should use WB page table mappings to avoid covering the VGA
frame buffer.
Notes
=====
[1] SDM rev 2.2, vol 2, sec 4.4.1.
[2] SDM rev 2.2, vol 2, sec 4.4.6.

View File

@ -1,144 +0,0 @@
==========================
EFI Real Time Clock driver
==========================
S. Eranian <eranian@hpl.hp.com>
March 2000
1. Introduction
===============
This document describes the efirtc.c driver has provided for
the IA-64 platform.
The purpose of this driver is to supply an API for kernel and user applications
to get access to the Time Service offered by EFI version 0.92.
EFI provides 4 calls one can make once the OS is booted: GetTime(),
SetTime(), GetWakeupTime(), SetWakeupTime() which are all supported by this
driver. We describe those calls as well the design of the driver in the
following sections.
2. Design Decisions
===================
The original ideas was to provide a very simple driver to get access to,
at first, the time of day service. This is required in order to access, in a
portable way, the CMOS clock. A program like /sbin/hwclock uses such a clock
to initialize the system view of the time during boot.
Because we wanted to minimize the impact on existing user-level apps using
the CMOS clock, we decided to expose an API that was very similar to the one
used today with the legacy RTC driver (driver/char/rtc.c). However, because
EFI provides a simpler services, not all ioctl() are available. Also
new ioctl()s have been introduced for things that EFI provides but not the
legacy.
EFI uses a slightly different way of representing the time, noticeably
the reference date is different. Year is the using the full 4-digit format.
The Epoch is January 1st 1998. For backward compatibility reasons we don't
expose this new way of representing time. Instead we use something very
similar to the struct tm, i.e. struct rtc_time, as used by hwclock.
One of the reasons for doing it this way is to allow for EFI to still evolve
without necessarily impacting any of the user applications. The decoupling
enables flexibility and permits writing wrapper code is ncase things change.
The driver exposes two interfaces, one via the device file and a set of
ioctl()s. The other is read-only via the /proc filesystem.
As of today we don't offer a /proc/sys interface.
To allow for a uniform interface between the legacy RTC and EFI time service,
we have created the include/linux/rtc.h header file to contain only the
"public" API of the two drivers. The specifics of the legacy RTC are still
in include/linux/mc146818rtc.h.
3. Time of day service
======================
The part of the driver gives access to the time of day service of EFI.
Two ioctl()s, compatible with the legacy RTC calls:
Read the CMOS clock::
ioctl(d, RTC_RD_TIME, &rtc);
Write the CMOS clock::
ioctl(d, RTC_SET_TIME, &rtc);
The rtc is a pointer to a data structure defined in rtc.h which is close
to a struct tm::
struct rtc_time {
int tm_sec;
int tm_min;
int tm_hour;
int tm_mday;
int tm_mon;
int tm_year;
int tm_wday;
int tm_yday;
int tm_isdst;
};
The driver takes care of converting back an forth between the EFI time and
this format.
Those two ioctl()s can be exercised with the hwclock command:
For reading::
# /sbin/hwclock --show
Mon Mar 6 15:32:32 2000 -0.910248 seconds
For setting::
# /sbin/hwclock --systohc
Root privileges are required to be able to set the time of day.
4. Wakeup Alarm service
=======================
EFI provides an API by which one can program when a machine should wakeup,
i.e. reboot. This is very different from the alarm provided by the legacy
RTC which is some kind of interval timer alarm. For this reason we don't use
the same ioctl()s to get access to the service. Instead we have
introduced 2 news ioctl()s to the interface of an RTC.
We have added 2 new ioctl()s that are specific to the EFI driver:
Read the current state of the alarm::
ioctl(d, RTC_WKALM_RD, &wkt)
Set the alarm or change its status::
ioctl(d, RTC_WKALM_SET, &wkt)
The wkt structure encapsulates a struct rtc_time + 2 extra fields to get
status information::
struct rtc_wkalrm {
unsigned char enabled; /* =1 if alarm is enabled */
unsigned char pending; /* =1 if alarm is pending */
struct rtc_time time;
}
As of today, none of the existing user-level apps supports this feature.
However writing such a program should be hard by simply using those two
ioctl().
Root privileges are required to be able to set the alarm.
5. References
=============
Checkout the following Web site for more information on EFI:
http://developer.intel.com/technology/efi/

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +0,0 @@
.. SPDX-License-Identifier: GPL-2.0
.. kernel-feat:: $srctree/Documentation/features ia64

View File

@ -1,303 +0,0 @@
===================================
Light-weight System Calls for IA-64
===================================
Started: 13-Jan-2003
Last update: 27-Sep-2003
David Mosberger-Tang
<davidm@hpl.hp.com>
Using the "epc" instruction effectively introduces a new mode of
execution to the ia64 linux kernel. We call this mode the
"fsys-mode". To recap, the normal states of execution are:
- kernel mode:
Both the register stack and the memory stack have been
switched over to kernel memory. The user-level state is saved
in a pt-regs structure at the top of the kernel memory stack.
- user mode:
Both the register stack and the kernel stack are in
user memory. The user-level state is contained in the
CPU registers.
- bank 0 interruption-handling mode:
This is the non-interruptible state which all
interruption-handlers start execution in. The user-level
state remains in the CPU registers and some kernel state may
be stored in bank 0 of registers r16-r31.
In contrast, fsys-mode has the following special properties:
- execution is at privilege level 0 (most-privileged)
- CPU registers may contain a mixture of user-level and kernel-level
state (it is the responsibility of the kernel to ensure that no
security-sensitive kernel-level state is leaked back to
user-level)
- execution is interruptible and preemptible (an fsys-mode handler
can disable interrupts and avoid all other interruption-sources
to avoid preemption)
- neither the memory-stack nor the register-stack can be trusted while
in fsys-mode (they point to the user-level stacks, which may
be invalid, or completely bogus addresses)
In summary, fsys-mode is much more similar to running in user-mode
than it is to running in kernel-mode. Of course, given that the
privilege level is at level 0, this means that fsys-mode requires some
care (see below).
How to tell fsys-mode
=====================
Linux operates in fsys-mode when (a) the privilege level is 0 (most
privileged) and (b) the stacks have NOT been switched to kernel memory
yet. For convenience, the header file <asm-ia64/ptrace.h> provides
three macros::
user_mode(regs)
user_stack(task,regs)
fsys_mode(task,regs)
The "regs" argument is a pointer to a pt_regs structure. The "task"
argument is a pointer to the task structure to which the "regs"
pointer belongs to. user_mode() returns TRUE if the CPU state pointed
to by "regs" was executing in user mode (privilege level 3).
user_stack() returns TRUE if the state pointed to by "regs" was
executing on the user-level stack(s). Finally, fsys_mode() returns
TRUE if the CPU state pointed to by "regs" was executing in fsys-mode.
The fsys_mode() macro is equivalent to the expression::
!user_mode(regs) && user_stack(task,regs)
How to write an fsyscall handler
================================
The file arch/ia64/kernel/fsys.S contains a table of fsyscall-handlers
(fsyscall_table). This table contains one entry for each system call.
By default, a system call is handled by fsys_fallback_syscall(). This
routine takes care of entering (full) kernel mode and calling the
normal Linux system call handler. For performance-critical system
calls, it is possible to write a hand-tuned fsyscall_handler. For
example, fsys.S contains fsys_getpid(), which is a hand-tuned version
of the getpid() system call.
The entry and exit-state of an fsyscall handler is as follows:
Machine state on entry to fsyscall handler
------------------------------------------
========= ===============================================================
r10 0
r11 saved ar.pfs (a user-level value)
r15 system call number
r16 "current" task pointer (in normal kernel-mode, this is in r13)
r32-r39 system call arguments
b6 return address (a user-level value)
ar.pfs previous frame-state (a user-level value)
PSR.be cleared to zero (i.e., little-endian byte order is in effect)
- all other registers may contain values passed in from user-mode
========= ===============================================================
Required machine state on exit to fsyscall handler
--------------------------------------------------
========= ===========================================================
r11 saved ar.pfs (as passed into the fsyscall handler)
r15 system call number (as passed into the fsyscall handler)
r32-r39 system call arguments (as passed into the fsyscall handler)
b6 return address (as passed into the fsyscall handler)
ar.pfs previous frame-state (as passed into the fsyscall handler)
========= ===========================================================
Fsyscall handlers can execute with very little overhead, but with that
speed comes a set of restrictions:
* Fsyscall-handlers MUST check for any pending work in the flags
member of the thread-info structure and if any of the
TIF_ALLWORK_MASK flags are set, the handler needs to fall back on
doing a full system call (by calling fsys_fallback_syscall).
* Fsyscall-handlers MUST preserve incoming arguments (r32-r39, r11,
r15, b6, and ar.pfs) because they will be needed in case of a
system call restart. Of course, all "preserved" registers also
must be preserved, in accordance to the normal calling conventions.
* Fsyscall-handlers MUST check argument registers for containing a
NaT value before using them in any way that could trigger a
NaT-consumption fault. If a system call argument is found to
contain a NaT value, an fsyscall-handler may return immediately
with r8=EINVAL, r10=-1.
* Fsyscall-handlers MUST NOT use the "alloc" instruction or perform
any other operation that would trigger mandatory RSE
(register-stack engine) traffic.
* Fsyscall-handlers MUST NOT write to any stacked registers because
it is not safe to assume that user-level called a handler with the
proper number of arguments.
* Fsyscall-handlers need to be careful when accessing per-CPU variables:
unless proper safe-guards are taken (e.g., interruptions are avoided),
execution may be pre-empted and resumed on another CPU at any given
time.
* Fsyscall-handlers must be careful not to leak sensitive kernel'
information back to user-level. In particular, before returning to
user-level, care needs to be taken to clear any scratch registers
that could contain sensitive information (note that the current
task pointer is not considered sensitive: it's already exposed
through ar.k6).
* Fsyscall-handlers MUST NOT access user-memory without first
validating access-permission (this can be done typically via
probe.r.fault and/or probe.w.fault) and without guarding against
memory access exceptions (this can be done with the EX() macros
defined by asmmacro.h).
The above restrictions may seem draconian, but remember that it's
possible to trade off some of the restrictions by paying a slightly
higher overhead. For example, if an fsyscall-handler could benefit
from the shadow register bank, it could temporarily disable PSR.i and
PSR.ic, switch to bank 0 (bsw.0) and then use the shadow registers as
needed. In other words, following the above rules yields extremely
fast system call execution (while fully preserving system call
semantics), but there is also a lot of flexibility in handling more
complicated cases.
Signal handling
===============
The delivery of (asynchronous) signals must be delayed until fsys-mode
is exited. This is accomplished with the help of the lower-privilege
transfer trap: arch/ia64/kernel/process.c:do_notify_resume_user()
checks whether the interrupted task was in fsys-mode and, if so, sets
PSR.lp and returns immediately. When fsys-mode is exited via the
"br.ret" instruction that lowers the privilege level, a trap will
occur. The trap handler clears PSR.lp again and returns immediately.
The kernel exit path then checks for and delivers any pending signals.
PSR Handling
============
The "epc" instruction doesn't change the contents of PSR at all. This
is in contrast to a regular interruption, which clears almost all
bits. Because of that, some care needs to be taken to ensure things
work as expected. The following discussion describes how each PSR bit
is handled.
======= =======================================================================
PSR.be Cleared when entering fsys-mode. A srlz.d instruction is used
to ensure the CPU is in little-endian mode before the first
load/store instruction is executed. PSR.be is normally NOT
restored upon return from an fsys-mode handler. In other
words, user-level code must not rely on PSR.be being preserved
across a system call.
PSR.up Unchanged.
PSR.ac Unchanged.
PSR.mfl Unchanged. Note: fsys-mode handlers must not write-registers!
PSR.mfh Unchanged. Note: fsys-mode handlers must not write-registers!
PSR.ic Unchanged. Note: fsys-mode handlers can clear the bit, if needed.
PSR.i Unchanged. Note: fsys-mode handlers can clear the bit, if needed.
PSR.pk Unchanged.
PSR.dt Unchanged.
PSR.dfl Unchanged. Note: fsys-mode handlers must not write-registers!
PSR.dfh Unchanged. Note: fsys-mode handlers must not write-registers!
PSR.sp Unchanged.
PSR.pp Unchanged.
PSR.di Unchanged.
PSR.si Unchanged.
PSR.db Unchanged. The kernel prevents user-level from setting a hardware
breakpoint that triggers at any privilege level other than
3 (user-mode).
PSR.lp Unchanged.
PSR.tb Lazy redirect. If a taken-branch trap occurs while in
fsys-mode, the trap-handler modifies the saved machine state
such that execution resumes in the gate page at
syscall_via_break(), with privilege level 3. Note: the
taken branch would occur on the branch invoking the
fsyscall-handler, at which point, by definition, a syscall
restart is still safe. If the system call number is invalid,
the fsys-mode handler will return directly to user-level. This
return will trigger a taken-branch trap, but since the trap is
taken _after_ restoring the privilege level, the CPU has already
left fsys-mode, so no special treatment is needed.
PSR.rt Unchanged.
PSR.cpl Cleared to 0.
PSR.is Unchanged (guaranteed to be 0 on entry to the gate page).
PSR.mc Unchanged.
PSR.it Unchanged (guaranteed to be 1).
PSR.id Unchanged. Note: the ia64 linux kernel never sets this bit.
PSR.da Unchanged. Note: the ia64 linux kernel never sets this bit.
PSR.dd Unchanged. Note: the ia64 linux kernel never sets this bit.
PSR.ss Lazy redirect. If set, "epc" will cause a Single Step Trap to
be taken. The trap handler then modifies the saved machine
state such that execution resumes in the gate page at
syscall_via_break(), with privilege level 3.
PSR.ri Unchanged.
PSR.ed Unchanged. Note: This bit could only have an effect if an fsys-mode
handler performed a speculative load that gets NaTted. If so, this
would be the normal & expected behavior, so no special treatment is
needed.
PSR.bn Unchanged. Note: fsys-mode handlers may clear the bit, if needed.
Doing so requires clearing PSR.i and PSR.ic as well.
PSR.ia Unchanged. Note: the ia64 linux kernel never sets this bit.
======= =======================================================================
Using fast system calls
=======================
To use fast system calls, userspace applications need simply call
__kernel_syscall_via_epc(). For example
-- example fgettimeofday() call --
-- fgettimeofday.S --
::
#include <asm/asmmacro.h>
GLOBAL_ENTRY(fgettimeofday)
.prologue
.save ar.pfs, r11
mov r11 = ar.pfs
.body
mov r2 = 0xa000000000020660;; // gate address
// found by inspection of System.map for the
// __kernel_syscall_via_epc() function. See
// below for how to do this for real.
mov b7 = r2
mov r15 = 1087 // gettimeofday syscall
;;
br.call.sptk.many b6 = b7
;;
.restore sp
mov ar.pfs = r11
br.ret.sptk.many rp;; // return to caller
END(fgettimeofday)
-- end fgettimeofday.S --
In reality, getting the gate address is accomplished by two extra
values passed via the ELF auxiliary vector (include/asm-ia64/elf.h)
* AT_SYSINFO : is the address of __kernel_syscall_via_epc()
* AT_SYSINFO_EHDR : is the address of the kernel gate ELF DSO
The ELF DSO is a pre-linked library that is mapped in by the kernel at
the gate page. It is a proper ELF shared object so, with a dynamic
loader that recognises the library, you should be able to make calls to
the exported functions within it as with any other shared library.
AT_SYSINFO points into the kernel DSO at the
__kernel_syscall_via_epc() function for historical reasons (it was
used before the kernel DSO) and as a convenience.

View File

@ -1,49 +0,0 @@
===========================================
Linux kernel release for the IA-64 Platform
===========================================
These are the release notes for Linux since version 2.4 for IA-64
platform. This document provides information specific to IA-64
ONLY, to get additional information about the Linux kernel also
read the original Linux README provided with the kernel.
Installing the Kernel
=====================
- IA-64 kernel installation is the same as the other platforms, see
original README for details.
Software Requirements
=====================
Compiling and running this kernel requires an IA-64 compliant GCC
compiler. And various software packages also compiled with an
IA-64 compliant GCC compiler.
Configuring the Kernel
======================
Configuration is the same, see original README for details.
Compiling the Kernel:
- Compiling this kernel doesn't differ from other platform so read
the original README for details BUT make sure you have an IA-64
compliant GCC compiler.
IA-64 Specifics
===============
- General issues:
* Hardly any performance tuning has been done. Obvious targets
include the library routines (IP checksum, etc.). Less
obvious targets include making sure we don't flush the TLB
needlessly, etc.
* SMP locks cleanup/optimization
* IA32 support. Currently experimental. It mostly works.

View File

@ -1,19 +0,0 @@
.. SPDX-License-Identifier: GPL-2.0
==================
IA-64 Architecture
==================
.. toctree::
:maxdepth: 1
ia64
aliasing
efirtc
err_inject
fsys
irq-redir
mca
serial
features

View File

@ -1,80 +0,0 @@
==============================
IRQ affinity on IA64 platforms
==============================
07.01.2002, Erich Focht <efocht@ess.nec.de>
By writing to /proc/irq/IRQ#/smp_affinity the interrupt routing can be
controlled. The behavior on IA64 platforms is slightly different from
that described in Documentation/core-api/irq/irq-affinity.rst for i386 systems.
Because of the usage of SAPIC mode and physical destination mode the
IRQ target is one particular CPU and cannot be a mask of several
CPUs. Only the first non-zero bit is taken into account.
Usage examples
==============
The target CPU has to be specified as a hexadecimal CPU mask. The
first non-zero bit is the selected CPU. This format has been kept for
compatibility reasons with i386.
Set the delivery mode of interrupt 41 to fixed and route the
interrupts to CPU #3 (logical CPU number) (2^3=0x08)::
echo "8" >/proc/irq/41/smp_affinity
Set the default route for IRQ number 41 to CPU 6 in lowest priority
delivery mode (redirectable)::
echo "r 40" >/proc/irq/41/smp_affinity
The output of the command::
cat /proc/irq/IRQ#/smp_affinity
gives the target CPU mask for the specified interrupt vector. If the CPU
mask is preceded by the character "r", the interrupt is redirectable
(i.e. lowest priority mode routing is used), otherwise its route is
fixed.
Initialization and default behavior
===================================
If the platform features IRQ redirection (info provided by SAL) all
IO-SAPIC interrupts are initialized with CPU#0 as their default target
and the routing is the so called "lowest priority mode" (actually
fixed SAPIC mode with hint). The XTP chipset registers are used as hints
for the IRQ routing. Currently in Linux XTP registers can have three
values:
- minimal for an idle task,
- normal if any other task runs,
- maximal if the CPU is going to be switched off.
The IRQ is routed to the CPU with lowest XTP register value, the
search begins at the default CPU. Therefore most of the interrupts
will be handled by CPU #0.
If the platform doesn't feature interrupt redirection IOSAPIC fixed
routing is used. The target CPUs are distributed in a round robin
manner. IRQs will be routed only to the selected target CPUs. Check
with::
cat /proc/interrupts
Comments
========
On large (multi-node) systems it is recommended to route the IRQs to
the node to which the corresponding device is connected.
For systems like the NEC AzusA we get IRQ node-affinity for free. This
is because usually the chipsets on each node redirect the interrupts
only to their own CPUs (as they cannot see the XTP registers on the
other nodes).

View File

@ -1,198 +0,0 @@
=============================================================
An ad-hoc collection of notes on IA64 MCA and INIT processing
=============================================================
Feel free to update it with notes about any area that is not clear.
---
MCA/INIT are completely asynchronous. They can occur at any time, when
the OS is in any state. Including when one of the cpus is already
holding a spinlock. Trying to get any lock from MCA/INIT state is
asking for deadlock. Also the state of structures that are protected
by locks is indeterminate, including linked lists.
---
The complicated ia64 MCA process. All of this is mandated by Intel's
specification for ia64 SAL, error recovery and unwind, it is not as
if we have a choice here.
* MCA occurs on one cpu, usually due to a double bit memory error.
This is the monarch cpu.
* SAL sends an MCA rendezvous interrupt (which is a normal interrupt)
to all the other cpus, the slaves.
* Slave cpus that receive the MCA interrupt call down into SAL, they
end up spinning disabled while the MCA is being serviced.
* If any slave cpu was already spinning disabled when the MCA occurred
then it cannot service the MCA interrupt. SAL waits ~20 seconds then
sends an unmaskable INIT event to the slave cpus that have not
already rendezvoused.
* Because MCA/INIT can be delivered at any time, including when the cpu
is down in PAL in physical mode, the registers at the time of the
event are _completely_ undefined. In particular the MCA/INIT
handlers cannot rely on the thread pointer, PAL physical mode can
(and does) modify TP. It is allowed to do that as long as it resets
TP on return. However MCA/INIT events expose us to these PAL
internal TP changes. Hence curr_task().
* If an MCA/INIT event occurs while the kernel was running (not user
space) and the kernel has called PAL then the MCA/INIT handler cannot
assume that the kernel stack is in a fit state to be used. Mainly
because PAL may or may not maintain the stack pointer internally.
Because the MCA/INIT handlers cannot trust the kernel stack, they
have to use their own, per-cpu stacks. The MCA/INIT stacks are
preformatted with just enough task state to let the relevant handlers
do their job.
* Unlike most other architectures, the ia64 struct task is embedded in
the kernel stack[1]. So switching to a new kernel stack means that
we switch to a new task as well. Because various bits of the kernel
assume that current points into the struct task, switching to a new
stack also means a new value for current.
* Once all slaves have rendezvoused and are spinning disabled, the
monarch is entered. The monarch now tries to diagnose the problem
and decide if it can recover or not.
* Part of the monarch's job is to look at the state of all the other
tasks. The only way to do that on ia64 is to call the unwinder,
as mandated by Intel.
* The starting point for the unwind depends on whether a task is
running or not. That is, whether it is on a cpu or is blocked. The
monarch has to determine whether or not a task is on a cpu before it
knows how to start unwinding it. The tasks that received an MCA or
INIT event are no longer running, they have been converted to blocked
tasks. But (and its a big but), the cpus that received the MCA
rendezvous interrupt are still running on their normal kernel stacks!
* To distinguish between these two cases, the monarch must know which
tasks are on a cpu and which are not. Hence each slave cpu that
switches to an MCA/INIT stack, registers its new stack using
set_curr_task(), so the monarch can tell that the _original_ task is
no longer running on that cpu. That gives us a decent chance of
getting a valid backtrace of the _original_ task.
* MCA/INIT can be nested, to a depth of 2 on any cpu. In the case of a
nested error, we want diagnostics on the MCA/INIT handler that
failed, not on the task that was originally running. Again this
requires set_curr_task() so the MCA/INIT handlers can register their
own stack as running on that cpu. Then a recursive error gets a
trace of the failing handler's "task".
[1]
My (Keith Owens) original design called for ia64 to separate its
struct task and the kernel stacks. Then the MCA/INIT data would be
chained stacks like i386 interrupt stacks. But that required
radical surgery on the rest of ia64, plus extra hard wired TLB
entries with its associated performance degradation. David
Mosberger vetoed that approach. Which meant that separate kernel
stacks meant separate "tasks" for the MCA/INIT handlers.
---
INIT is less complicated than MCA. Pressing the nmi button or using
the equivalent command on the management console sends INIT to all
cpus. SAL picks one of the cpus as the monarch and the rest are
slaves. All the OS INIT handlers are entered at approximately the same
time. The OS monarch prints the state of all tasks and returns, after
which the slaves return and the system resumes.
At least that is what is supposed to happen. Alas there are broken
versions of SAL out there. Some drive all the cpus as monarchs. Some
drive them all as slaves. Some drive one cpu as monarch, wait for that
cpu to return from the OS then drive the rest as slaves. Some versions
of SAL cannot even cope with returning from the OS, they spin inside
SAL on resume. The OS INIT code has workarounds for some of these
broken SAL symptoms, but some simply cannot be fixed from the OS side.
---
The scheduler hooks used by ia64 (curr_task, set_curr_task) are layer
violations. Unfortunately MCA/INIT start off as massive layer
violations (can occur at _any_ time) and they build from there.
At least ia64 makes an attempt at recovering from hardware errors, but
it is a difficult problem because of the asynchronous nature of these
errors. When processing an unmaskable interrupt we sometimes need
special code to cope with our inability to take any locks.
---
How is ia64 MCA/INIT different from x86 NMI?
* x86 NMI typically gets delivered to one cpu. MCA/INIT gets sent to
all cpus.
* x86 NMI cannot be nested. MCA/INIT can be nested, to a depth of 2
per cpu.
* x86 has a separate struct task which points to one of multiple kernel
stacks. ia64 has the struct task embedded in the single kernel
stack, so switching stack means switching task.
* x86 does not call the BIOS so the NMI handler does not have to worry
about any registers having changed. MCA/INIT can occur while the cpu
is in PAL in physical mode, with undefined registers and an undefined
kernel stack.
* i386 backtrace is not very sensitive to whether a process is running
or not. ia64 unwind is very, very sensitive to whether a process is
running or not.
---
What happens when MCA/INIT is delivered what a cpu is running user
space code?
The user mode registers are stored in the RSE area of the MCA/INIT on
entry to the OS and are restored from there on return to SAL, so user
mode registers are preserved across a recoverable MCA/INIT. Since the
OS has no idea what unwind data is available for the user space stack,
MCA/INIT never tries to backtrace user space. Which means that the OS
does not bother making the user space process look like a blocked task,
i.e. the OS does not copy pt_regs and switch_stack to the user space
stack. Also the OS has no idea how big the user space RSE and memory
stacks are, which makes it too risky to copy the saved state to a user
mode stack.
---
How do we get a backtrace on the tasks that were running when MCA/INIT
was delivered?
mca.c:::ia64_mca_modify_original_stack(). That identifies and
verifies the original kernel stack, copies the dirty registers from
the MCA/INIT stack's RSE to the original stack's RSE, copies the
skeleton struct pt_regs and switch_stack to the original stack, fills
in the skeleton structures from the PAL minstate area and updates the
original stack's thread.ksp. That makes the original stack look
exactly like any other blocked task, i.e. it now appears to be
sleeping. To get a backtrace, just start with thread.ksp for the
original task and unwind like any other sleeping task.
---
How do we identify the tasks that were running when MCA/INIT was
delivered?
If the previous task has been verified and converted to a blocked
state, then sos->prev_task on the MCA/INIT stack is updated to point to
the previous task. You can look at that field in dumps or debuggers.
To help distinguish between the handler and the original tasks,
handlers have _TIF_MCA_INIT set in thread_info.flags.
The sos data is always in the MCA/INIT handler stack, at offset
MCA_SOS_OFFSET. You can get that value from mca_asm.h or calculate it
as KERNEL_STACK_SIZE - sizeof(struct pt_regs) - sizeof(struct
ia64_sal_os_state), with 16 byte alignment for all structures.
Also the comm field of the MCA/INIT task is modified to include the pid
of the original task, for humans to use. For example, a comm field of
'MCA 12159' means that pid 12159 was running when the MCA was
delivered.

View File

@ -1,165 +0,0 @@
==============
Serial Devices
==============
Serial Device Naming
====================
As of 2.6.10, serial devices on ia64 are named based on the
order of ACPI and PCI enumeration. The first device in the
ACPI namespace (if any) becomes /dev/ttyS0, the second becomes
/dev/ttyS1, etc., and PCI devices are named sequentially
starting after the ACPI devices.
Prior to 2.6.10, there were confusing exceptions to this:
- Firmware on some machines (mostly from HP) provides an HCDP
table[1] that tells the kernel about devices that can be used
as a serial console. If the user specified "console=ttyS0"
or the EFI ConOut path contained only UART devices, the
kernel registered the device described by the HCDP as
/dev/ttyS0.
- If there was no HCDP, we assumed there were UARTs at the
legacy COM port addresses (I/O ports 0x3f8 and 0x2f8), so
the kernel registered those as /dev/ttyS0 and /dev/ttyS1.
Any additional ACPI or PCI devices were registered sequentially
after /dev/ttyS0 as they were discovered.
With an HCDP, device names changed depending on EFI configuration
and "console=" arguments. Without an HCDP, device names didn't
change, but we registered devices that might not really exist.
For example, an HP rx1600 with a single built-in serial port
(described in the ACPI namespace) plus an MP[2] (a PCI device) has
these ports:
========== ========== ============ ============ =======
Type MMIO pre-2.6.10 pre-2.6.10 2.6.10+
address
(EFI console (EFI console
on builtin) on MP port)
========== ========== ============ ============ =======
builtin 0xff5e0000 ttyS0 ttyS1 ttyS0
MP UPS 0xf8031000 ttyS1 ttyS2 ttyS1
MP Console 0xf8030000 ttyS2 ttyS0 ttyS2
MP 2 0xf8030010 ttyS3 ttyS3 ttyS3
MP 3 0xf8030038 ttyS4 ttyS4 ttyS4
========== ========== ============ ============ =======
Console Selection
=================
EFI knows what your console devices are, but it doesn't tell the
kernel quite enough to actually locate them. The DIG64 HCDP
table[1] does tell the kernel where potential serial console
devices are, but not all firmware supplies it. Also, EFI supports
multiple simultaneous consoles and doesn't tell the kernel which
should be the "primary" one.
So how do you tell Linux which console device to use?
- If your firmware supplies the HCDP, it is simplest to
configure EFI with a single device (either a UART or a VGA
card) as the console. Then you don't need to tell Linux
anything; the kernel will automatically use the EFI console.
(This works only in 2.6.6 or later; prior to that you had
to specify "console=ttyS0" to get a serial console.)
- Without an HCDP, Linux defaults to a VGA console unless you
specify a "console=" argument.
NOTE: Don't assume that a serial console device will be /dev/ttyS0.
It might be ttyS1, ttyS2, etc. Make sure you have the appropriate
entries in /etc/inittab (for getty) and /etc/securetty (to allow
root login).
Early Serial Console
====================
The kernel can't start using a serial console until it knows where
the device lives. Normally this happens when the driver enumerates
all the serial devices, which can happen a minute or more after the
kernel starts booting.
2.6.10 and later kernels have an "early uart" driver that works
very early in the boot process. The kernel will automatically use
this if the user supplies an argument like "console=uart,io,0x3f8",
or if the EFI console path contains only a UART device and the
firmware supplies an HCDP.
Troubleshooting Serial Console Problems
=======================================
No kernel output after elilo prints "Uncompressing Linux... done":
- You specified "console=ttyS0" but Linux changed the device
to which ttyS0 refers. Configure exactly one EFI console
device[3] and remove the "console=" option.
- The EFI console path contains both a VGA device and a UART.
EFI and elilo use both, but Linux defaults to VGA. Remove
the VGA device from the EFI console path[3].
- Multiple UARTs selected as EFI console devices. EFI and
elilo use all selected devices, but Linux uses only one.
Make sure only one UART is selected in the EFI console
path[3].
- You're connected to an HP MP port[2] but have a non-MP UART
selected as EFI console device. EFI uses the MP as a
console device even when it isn't explicitly selected.
Either move the console cable to the non-MP UART, or change
the EFI console path[3] to the MP UART.
Long pause (60+ seconds) between "Uncompressing Linux... done" and
start of kernel output:
- No early console because you used "console=ttyS<n>". Remove
the "console=" option if your firmware supplies an HCDP.
- If you don't have an HCDP, the kernel doesn't know where
your console lives until the driver discovers serial
devices. Use "console=uart,io,0x3f8" (or appropriate
address for your machine).
Kernel and init script output works fine, but no "login:" prompt:
- Add getty entry to /etc/inittab for console tty. Look for
the "Adding console on ttyS<n>" message that tells you which
device is the console.
"login:" prompt, but can't login as root:
- Add entry to /etc/securetty for console tty.
No ACPI serial devices found in 2.6.17 or later:
- Turn on CONFIG_PNP and CONFIG_PNPACPI. Prior to 2.6.17, ACPI
serial devices were discovered by 8250_acpi. In 2.6.17,
8250_acpi was replaced by the combination of 8250_pnp and
CONFIG_PNPACPI.
[1]
http://www.dig64.org/specifications/agreement
The table was originally defined as the "HCDP" for "Headless
Console/Debug Port." The current version is the "PCDP" for
"Primary Console and Debug Port Devices."
[2]
The HP MP (management processor) is a PCI device that provides
several UARTs. One of the UARTs is often used as a console; the
EFI Boot Manager identifies it as "Acpi(HWP0002,700)/Pci(...)/Uart".
The external connection is usually a 25-pin connector, and a
special dongle converts that to three 9-pin connectors, one of
which is labelled "Console."
[3]
EFI console devices are configured using the EFI Boot Manager
"Boot option maintenance" menu. You may have to interrupt the
boot sequence to use this menu, and you will have to reset the
box after changing console configuration.

View File

@ -12,7 +12,6 @@ implementation.
arc/index
arm/index
arm64/index
ia64/index
loongarch/index
m68k/index
mips/index

View File

@ -80,9 +80,6 @@ ionice.c tool::
#elif defined(__x86_64__)
#define __NR_ioprio_set 251
#define __NR_ioprio_get 252
#elif defined(__ia64__)
#define __NR_ioprio_set 1274
#define __NR_ioprio_get 1275
#else
#error "Unsupported arch"
#endif

View File

@ -40,12 +40,6 @@ Command Line Switches
supplied here is lower than the number of physically available CPUs, then
those CPUs can not be brought online later.
``additional_cpus=n``
Use this to limit hotpluggable CPUs. This option sets
``cpu_possible_mask = cpu_present_mask + additional_cpus``
This option is limited to the IA64 architecture.
``possible_cpus=n``
This option sets ``possible_cpus`` bits in ``cpu_possible_mask``.

View File

@ -23,9 +23,9 @@ Retrieving a full system memory dump is also possible over the FireWire,
using data transfer rates in the order of 10MB/s or more.
With most FireWire controllers, memory access is limited to the low 4 GB
of physical address space. This can be a problem on IA64 machines where
memory is located mostly above that limit, but it is rarely a problem on
more common hardware such as x86, x86-64 and PowerPC.
of physical address space. This can be a problem on machines where memory is
located mostly above that limit, but it is rarely a problem on more common
hardware such as x86, x86-64 and PowerPC.
At least LSI FW643e and FW643e2 controllers are known to support access to
physical addresses above 4 GB, but this feature is currently not enabled by

View File

@ -12,7 +12,6 @@
| arm64: | TODO |
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | TODO |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | ok |
| ia64: | ok |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | TODO |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | ok |
| ia64: | ok |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | TODO |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | TODO |
| m68k: | TODO |
| microblaze: | ok |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | TODO |
| hexagon: | ok |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | ok |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | ok |

View File

@ -12,7 +12,6 @@
| arm64: | TODO |
| csky: | ok |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | TODO |
| ia64: | ok |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | TODO |
| ia64: | ok |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | TODO |
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | TODO |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | TODO |
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | TODO |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | ok |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | TODO |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | ok |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | ok |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | ok |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -35,7 +35,6 @@
| arm64: | ok |
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | TODO |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | .. |
| hexagon: | .. |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | .. |
| microblaze: | .. |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | ok |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | ok |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | ok |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | TODO |
| hexagon: | TODO |
| ia64: | .. |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | TODO |
| ia64: | ok |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -13,7 +13,6 @@
| arm64: | ok |
| csky: | ok |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | TODO |
| csky: | TODO |
| hexagon: | TODO |
| ia64: | ok |
| loongarch: | TODO |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | .. |
| hexagon: | .. |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | .. |
| microblaze: | .. |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | TODO |
| m68k: | .. |
| microblaze: | .. |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | TODO |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -12,7 +12,6 @@
| arm64: | ok |
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |

View File

@ -53,7 +53,7 @@ knowledge about the kernel Makefiles, plus detailed knowledge about the
public interface for kbuild.
*Arch developers* are people who work on an entire architecture, such
as sparc or ia64. Arch developers need to know about the arch Makefile
as sparc or x86. Arch developers need to know about the arch Makefile
as well as kbuild Makefiles.
*Kbuild developers* are people who work on the kernel build system itself.

View File

@ -64,8 +64,8 @@ c. Multi-buffer receive mode. Scattering of packet across multiple
IBM xSeries).
d. MSI/MSI-X. Can be enabled on platforms which support this feature
(IA64, Xeon) resulting in noticeable performance improvement(up to 7%
on certain platforms).
resulting in noticeable performance improvement (up to 7% on certain
platforms).
e. Statistics. Comprehensive MAC-level and software statistics displayed
using "ethtool -S" option.

View File

@ -10,7 +10,7 @@ Context switch
By default, the switch_to arch function is called with the runqueue
locked. This is usually not a problem unless switch_to may need to
take the runqueue lock. This is usually due to a wake up operation in
the context switch. See arch/ia64/include/asm/switch_to.h for an example.
the context switch.
To request the scheduler call switch_to with the runqueue unlocked,
you must `#define __ARCH_WANT_UNLOCKED_CTXSW` in a header file
@ -68,7 +68,5 @@ Possible arch/ problems
Possible arch problems I found (and either tried to fix or didn't):
ia64 - is safe_halt call racy vs interrupts? (does it sleep?) (See #4a)
sparc - IRQs on at this point(?), change local_irq_save to _disable.
- TODO: needs secondary CPUs to disable preempt (See #1)

View File

@ -315,7 +315,6 @@ architectures:
- i386 (Supports jump optimization)
- x86_64 (AMD-64, EM64T) (Supports jump optimization)
- ppc64
- ia64 (Does not support probes on instruction slot1.)
- sparc64 (Return probes not yet implemented.)
- arm
- ppc

View File

@ -18,7 +18,6 @@
TODOList:
* arm/index
* ia64/index
* m68k/index
* nios2/index
* powerpc/index

View File

@ -49,12 +49,6 @@ CPU热拔插支持的一个更新颖的用途是它在SMP的暂停恢复支持
限制内核将支持的CPU总量。如果这里提供的数量低于实际可用的CPU数量那么其他CPU
以后就不能上线了。
``additional_cpus=n``
使用它来限制可热插拔的CPU。该选项设置
``cpu_possible_mask = cpu_present_mask + additional_cpus``
这个选项只限于IA64架构。
``possible_cpus=n``
这个选项设置 ``cpu_possible_mask`` 中的 ``possible_cpus`` 位。

View File

@ -20,8 +20,7 @@
==========
1. 运行队列锁
默认情况下switch_to arch函数在调用时锁定了运行队列。这通常不是一个问题除非
switch_to可能需要获取运行队列锁。这通常是由于上下文切换中的唤醒操作造成的。见
arch/ia64/include/asm/switch_to.h的例子。
switch_to可能需要获取运行队列锁。这通常是由于上下文切换中的唤醒操作造成的。
为了要求调度器在运行队列解锁的情况下调用switch_to你必须在头文件
`#define __ARCH_WANT_UNLOCKED_CTXSW`(通常是定义switch_to的那个文件
@ -68,7 +67,5 @@ arch/x86/kernel/process.c有轮询和睡眠空闲函数的例子。
我发现的可能的arch问题并试图解决或没有解决。:
ia64 - safe_halt的调用与中断相比是否很荒谬 (它睡眠了吗) (参考 #4a)
sparc - 在这一点上IRQ是开着的把local_irq_save改为_disable。
- 待办事项: 需要第二个CPU来禁用抢占 (参考 #1)

View File

@ -10029,12 +10029,6 @@ F: Documentation/driver-api/i3c
F: drivers/i3c/
F: include/linux/i3c/
IA64 (Itanium) PLATFORM
L: linux-ia64@vger.kernel.org
S: Orphan
F: Documentation/arch/ia64/
F: arch/ia64/
IBM Operation Panel Input Driver
M: Eddie James <eajames@linux.ibm.com>
L: linux-input@vger.kernel.org
@ -16369,11 +16363,6 @@ L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/muxes/i2c-mux-pca9541.c
PCDP - PRIMARY CONSOLE AND DEBUG PORT
M: Khalid Aziz <khalid@gonehiking.org>
S: Maintained
F: drivers/firmware/pcdp.*
PCI DRIVER FOR AARDVARK (Marvell Armada 3700)
M: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
M: Pali Rohár <pali@kernel.org>

View File

@ -378,7 +378,7 @@ include $(srctree)/scripts/subarch.include
# When performing cross compilation for other architectures ARCH shall be set
# to the target architecture. (See arch/* for the possibilities).
# ARCH can be set during invocation of make:
# make ARCH=ia64
# make ARCH=arm64
# Another way is to have ARCH set in the environment.
# The default ARCH is the host where make is executed.
@ -386,7 +386,7 @@ include $(srctree)/scripts/subarch.include
# during compilation. Only gcc and related bin-utils executables
# are prefixed with $(CROSS_COMPILE).
# CROSS_COMPILE can be set on the command line
# make CROSS_COMPILE=ia64-linux-
# make CROSS_COMPILE=aarch64-linux-gnu-
# Alternatively CROSS_COMPILE can be set in the environment.
# Default value for CROSS_COMPILE is not to prefix executables
# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile

View File

@ -1088,7 +1088,6 @@ config HAVE_ARCH_COMPAT_MMAP_BASES
config PAGE_SIZE_LESS_THAN_64KB
def_bool y
depends on !ARM64_64K_PAGES
depends on !IA64_PAGE_SIZE_64KB
depends on !PAGE_SIZE_64KB
depends on !PARISC_PAGE_SIZE_64KB
depends on PAGE_SIZE_LESS_THAN_256KB

View File

@ -334,7 +334,7 @@
401 common io_submit sys_io_submit
402 common io_cancel sys_io_cancel
405 common exit_group sys_exit_group
406 common lookup_dcookie sys_lookup_dcookie
406 common lookup_dcookie sys_ni_syscall
407 common epoll_create sys_epoll_create
408 common epoll_ctl sys_epoll_ctl
409 common epoll_wait sys_epoll_wait
@ -492,7 +492,7 @@
560 common set_mempolicy_home_node sys_ni_syscall
561 common cachestat sys_cachestat
562 common fchmodat2 sys_fchmodat2
# 563 reserved for map_shadow_stack
563 common map_shadow_stack sys_map_shadow_stack
564 common futex_wake sys_futex_wake
565 common futex_wait sys_futex_wait
566 common futex_requeue sys_futex_requeue

View File

@ -263,7 +263,7 @@
246 common io_submit sys_io_submit
247 common io_cancel sys_io_cancel
248 common exit_group sys_exit_group
249 common lookup_dcookie sys_lookup_dcookie
249 common lookup_dcookie sys_ni_syscall
250 common epoll_create sys_epoll_create
251 common epoll_ctl sys_epoll_ctl sys_oabi_epoll_ctl
252 common epoll_wait sys_epoll_wait
@ -466,6 +466,7 @@
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
452 common fchmodat2 sys_fchmodat2
453 common map_shadow_stack sys_map_shadow_stack
454 common futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue

View File

@ -508,8 +508,8 @@ __SYSCALL(__NR_io_submit, compat_sys_io_submit)
__SYSCALL(__NR_io_cancel, sys_io_cancel)
#define __NR_exit_group 248
__SYSCALL(__NR_exit_group, sys_exit_group)
#define __NR_lookup_dcookie 249
__SYSCALL(__NR_lookup_dcookie, compat_sys_lookup_dcookie)
/* 249 was lookup_dcookie */
__SYSCALL(249, sys_ni_syscall)
#define __NR_epoll_create 250
__SYSCALL(__NR_epoll_create, sys_epoll_create)
#define __NR_epoll_ctl 251
@ -911,6 +911,8 @@ __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
__SYSCALL(__NR_cachestat, sys_cachestat)
#define __NR_fchmodat2 452
__SYSCALL(__NR_fchmodat2, sys_fchmodat2)
#define __NR_map_shadow_stack 453
__SYSCALL(__NR_map_shadow_stack, sys_map_shadow_stack)
#define __NR_futex_wake 454
__SYSCALL(__NR_futex_wake, sys_futex_wake)
#define __NR_futex_wait 455

View File

@ -0,0 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Ptrace definitions for the Hexagon architecture
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*/
#ifndef _ASM_HEXAGON_PTRACE_H
#define _ASM_HEXAGON_PTRACE_H
#include <uapi/asm/ptrace.h>
/* kprobe-based event tracer support */
extern int regs_query_register_offset(const char *name);
extern const char *regs_query_register_name(unsigned int offset);
#define current_pt_regs() \
((struct pt_regs *) \
((unsigned long)current_thread_info() + THREAD_SIZE) - 1)
#if CONFIG_HEXAGON_ARCH_VERSION >= 4
#define arch_has_single_step() (1)
#endif
#endif

View File

@ -29,17 +29,4 @@
#define profile_pc(regs) instruction_pointer(regs)
/* kprobe-based event tracer support */
extern int regs_query_register_offset(const char *name);
extern const char *regs_query_register_name(unsigned int offset);
#define current_pt_regs() \
((struct pt_regs *) \
((unsigned long)current_thread_info() + THREAD_SIZE) - 1)
#if CONFIG_HEXAGON_ARCH_VERSION >= 4
#define arch_has_single_step() (1)
#endif
#endif

View File

@ -1,3 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y += kernel/ mm/
obj-$(CONFIG_IA64_SGI_UV) += uv/

View File

@ -1,394 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
config PGTABLE_LEVELS
int "Page Table Levels" if !IA64_PAGE_SIZE_64KB
range 3 4 if !IA64_PAGE_SIZE_64KB
default 3
menu "Processor type and features"
config IA64
bool
select ARCH_BINFMT_ELF_EXTRA_PHDRS
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_DMA_MARK_CLEAN
select ARCH_HAS_STRNCPY_FROM_USER
select ARCH_HAS_STRNLEN_USER
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ACPI
select ACPI_NUMA if NUMA
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_SUPPORTS_ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select FORCE_PCI
select PCI_DOMAINS if PCI
select PCI_MSI
select PCI_SYSCALL if PCI
select HAS_IOPORT
select HAVE_ASM_MODVERSIONS
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_EXIT_THREAD
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE if (!ITANIUM)
select HAVE_FUNCTION_TRACER
select HAVE_SETUP_PER_CPU_AREA
select TTY
select HAVE_ARCH_TRACEHOOK
select HAVE_FUNCTION_DESCRIPTORS
select HAVE_VIRT_CPU_ACCOUNTING
select HUGETLB_PAGE_SIZE_VARIABLE if HUGETLB_PAGE
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_LEGACY
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_IOMAP
select GENERIC_IOREMAP
select GENERIC_SMP_IDLE_THREAD
select ARCH_TASK_STRUCT_ON_STACK
select ARCH_TASK_STRUCT_ALLOCATOR
select ARCH_THREAD_STACK_ALLOCATOR
select ARCH_CLOCKSOURCE_DATA
select GENERIC_TIME_VSYSCALL
select LEGACY_TIMER_TICK
select SWIOTLB
select SYSCTL_ARCH_UNALIGN_NO_WARN
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_AUDITSYSCALL
select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
select NUMA if !FLATMEM
select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
select ZONE_DMA32
select FUNCTION_ALIGNMENT_32B
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
the 32-bit X86 line. The IA-64 Linux project has a home
page at <http://www.linuxia64.org/> and a mailing list at
<linux-ia64@vger.kernel.org>.
config 64BIT
bool
select ATA_NONSTANDARD if ATA
default y
config MMU
bool
default y
config STACKTRACE_SUPPORT
def_bool y
config GENERIC_LOCKBREAK
def_bool n
config GENERIC_CALIBRATE_DELAY
bool
default y
config DMI
bool
default y
select DMI_SCAN_MACHINE_NON_EFI_FALLBACK
config EFI
bool
select UCS2_STRING
default y
config SCHED_OMIT_FRAME_POINTER
bool
default y
config IA64_UNCACHED_ALLOCATOR
bool
select GENERIC_ALLOCATOR
config ARCH_USES_PG_UNCACHED
def_bool y
depends on IA64_UNCACHED_ALLOCATOR
config AUDIT_ARCH
bool
default y
choice
prompt "Processor type"
default ITANIUM
config ITANIUM
bool "Itanium"
help
Select your IA-64 processor type. The default is Itanium.
This choice is safe for all IA-64 systems, but may not perform
optimally on systems with, say, Itanium 2 or newer processors.
config MCKINLEY
bool "Itanium 2"
help
Select this to configure for an Itanium 2 (McKinley) processor.
endchoice
choice
prompt "Kernel page size"
default IA64_PAGE_SIZE_16KB
config IA64_PAGE_SIZE_4KB
bool "4KB"
help
This lets you select the page size of the kernel. For best IA-64
performance, a page size of 8KB or 16KB is recommended. For best
IA-32 compatibility, a page size of 4KB should be selected (the vast
majority of IA-32 binaries work perfectly fine with a larger page
size). For Itanium 2 or newer systems, a page size of 64KB can also
be selected.
4KB For best IA-32 compatibility
8KB For best IA-64 performance
16KB For best IA-64 performance
64KB Requires Itanium 2 or newer processor.
If you don't know what to do, choose 16KB.
config IA64_PAGE_SIZE_8KB
bool "8KB"
config IA64_PAGE_SIZE_16KB
bool "16KB"
config IA64_PAGE_SIZE_64KB
depends on !ITANIUM
bool "64KB"
endchoice
source "kernel/Kconfig.hz"
config IA64_BRL_EMU
bool
depends on ITANIUM
default y
# align cache-sensitive data to 128 bytes
config IA64_L1_CACHE_SHIFT
int
default "7" if MCKINLEY
default "6" if ITANIUM
config IA64_SGI_UV
bool "SGI-UV support"
help
Selecting this option will add specific support for running on SGI
UV based systems. If you have an SGI UV system or are building a
distro kernel, select this option.
config IA64_HP_SBA_IOMMU
bool "HP SBA IOMMU support"
select DMA_OPS
default y
help
Say Y here to add support for the SBA IOMMU found on HP zx1 and
sx1000 systems. If you're unsure, answer Y.
config IA64_CYCLONE
bool "Cyclone (EXA) Time Source support"
help
Say Y here to enable support for IBM EXA Cyclone time source.
If you're unsure, answer N.
config ARCH_FORCE_MAX_ORDER
int
default "16" if HUGETLB_PAGE
default "10"
config SMP
bool "Symmetric multi-processing support"
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, say N. If you have a system with more
than one CPU, say Y.
If you say N here, the kernel will run on single and multiprocessor
systems, but will use only one CPU of a multiprocessor system. If
you say Y here, the kernel will run on many, but not all,
single processor systems. On a single processor system, the kernel
will run faster if you say N here.
See also the SMP-HOWTO available at
<http://www.tldp.org/docs.html#howto>.
If you don't know what to do here, say N.
config NR_CPUS
int "Maximum number of CPUs (2-4096)"
range 2 4096
depends on SMP
default "4096"
help
You should set this to the number of CPUs in your system, but
keep in mind that a kernel compiled for, e.g., 2 CPUs will boot but
only use 2 CPUs on a >2 CPU system. Setting this to a value larger
than 64 will cause the use of a CPU mask array, causing a small
performance hit.
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP
default n
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
config SCHED_SMT
bool "SMT scheduler support"
depends on SMP
help
Improves the CPU scheduler's decision making when dealing with
Intel IA64 chips with MultiThreading at a cost of slightly increased
overhead in some places. If unsure say N here.
config PERMIT_BSP_REMOVE
bool "Support removal of Bootstrap Processor"
depends on HOTPLUG_CPU
default n
help
Say Y here if your platform SAL will support removal of BSP with HOTPLUG_CPU
support.
config FORCE_CPEI_RETARGET
bool "Force assumption that CPEI can be re-targeted"
depends on PERMIT_BSP_REMOVE
default n
help
Say Y if you need to force the assumption that CPEI can be re-targeted to
any cpu in the system. This hint is available via ACPI 3.0 specifications.
Tiger4 systems are capable of re-directing CPEI to any CPU other than BSP.
This option it useful to enable this feature on older BIOS's as well.
You can also enable this by using boot command line option force_cpei=1.
config ARCH_SELECT_MEMORY_MODEL
def_bool y
config ARCH_FLATMEM_ENABLE
def_bool y
config ARCH_SPARSEMEM_ENABLE
def_bool y
select SPARSEMEM_VMEMMAP_ENABLE
config ARCH_SPARSEMEM_DEFAULT
def_bool y
depends on ARCH_SPARSEMEM_ENABLE
config NUMA
bool "NUMA support"
depends on !FLATMEM
select SMP
select USE_PERCPU_NUMA_NODE_ID
help
Say Y to compile the kernel to support NUMA (Non-Uniform Memory
Access). This option is for configuring high-end multiprocessor
server systems. If in doubt, say N.
config NODES_SHIFT
int "Max num nodes shift(3-10)"
range 3 10
default "10"
depends on NUMA
help
This option specifies the maximum number of nodes in your SSI system.
MAX_NUMNODES will be 2^(This value).
If in doubt, use the default.
config HAVE_ARCH_NODEDATA_EXTENSION
def_bool y
depends on NUMA
config HAVE_MEMORYLESS_NODES
def_bool NUMA
config ARCH_PROC_KCORE_TEXT
def_bool y
depends on PROC_KCORE
config IA64_MCA_RECOVERY
bool "MCA recovery from errors other than TLB."
config IA64_PALINFO
tristate "/proc/pal support"
help
If you say Y here, you are able to get PAL (Processor Abstraction
Layer) information in /proc/pal. This contains useful information
about the processors in your systems, such as cache and TLB sizes
and the PAL firmware version in use.
To use this option, you have to ensure that the "/proc file system
support" (CONFIG_PROC_FS) is enabled, too.
config IA64_MC_ERR_INJECT
tristate "MC error injection support"
help
Adds support for MC error injection. If enabled, the kernel
will provide a sysfs interface for user applications to
call MC error injection PAL procedures to inject various errors.
This is a useful tool for MCA testing.
If you're unsure, do not select this option.
config IA64_ESI
bool "ESI (Extensible SAL Interface) support"
help
If you say Y here, support is built into the kernel to
make ESI calls. ESI calls are used to support vendor-specific
firmware extensions, such as the ability to inject memory-errors
for test-purposes. If you're unsure, say N.
config IA64_HP_AML_NFW
bool "Support ACPI AML calls to native firmware"
help
This driver installs a global ACPI Operation Region handler for
region 0xA1. AML methods can use this OpRegion to call arbitrary
native firmware functions. The driver installs the OpRegion
handler if there is an HPQ5001 device or if the user supplies
the "force" module parameter, e.g., with the "aml_nfw.force"
kernel command line option.
endmenu
config ARCH_SUPPORTS_KEXEC
def_bool !SMP || HOTPLUG_CPU
config ARCH_SUPPORTS_CRASH_DUMP
def_bool IA64_MCA_RECOVERY && (!SMP || HOTPLUG_CPU)
menu "Power management and ACPI options"
source "kernel/power/Kconfig"
source "drivers/acpi/Kconfig"
if PM
menu "CPU Frequency scaling"
source "drivers/cpufreq/Kconfig"
endmenu
endif
endmenu
config MSPEC
tristate "Memory special operations driver"
depends on IA64
select IA64_UNCACHED_ALLOCATOR
help
If you have an ia64 and you want to enable memory special
operations support (formerly known as fetchop), say Y here,
otherwise say N.

View File

@ -1,55 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
choice
prompt "Physical memory granularity"
default IA64_GRANULE_64MB
config IA64_GRANULE_16MB
bool "16MB"
help
IA-64 identity-mapped regions use a large page size called "granules".
Select "16MB" for a small granule size.
Select "64MB" for a large granule size. This is the current default.
config IA64_GRANULE_64MB
bool "64MB"
depends on BROKEN
endchoice
config IA64_PRINT_HAZARDS
bool "Print possible IA-64 dependency violations to console"
depends on DEBUG_KERNEL
help
Selecting this option prints more information for Illegal Dependency
Faults, that is, for Read-after-Write (RAW), Write-after-Write (WAW),
or Write-after-Read (WAR) violations. This option is ignored if you
are compiling for an Itanium A step processor
(CONFIG_ITANIUM_ASTEP_SPECIFIC). If you're unsure, select Y.
config DISABLE_VHPT
bool "Disable VHPT"
depends on DEBUG_KERNEL
help
The Virtual Hash Page Table (VHPT) enhances virtual address
translation performance. Normally you want the VHPT active but you
can select this option to disable the VHPT for debugging. If you're
unsure, answer N.
config IA64_DEBUG_CMPXCHG
bool "Turn on compare-and-exchange bug checking (slow!)"
depends on DEBUG_KERNEL && PRINTK
help
Selecting this option turns on bug checking for the IA-64
compare-and-exchange instructions. This is slow! Itaniums
from step B3 or later don't have this problem. If you're unsure,
select N.
config IA64_DEBUG_IRQ
bool "Turn on irq debug checks (slow!)"
depends on DEBUG_KERNEL
help
Selecting this option turns on bug checking for the IA-64 irq_save
and restore instructions. It's useful for tracking down spinlock
problems, but slow! If you're unsure, select N.

View File

@ -1,82 +0,0 @@
#
# ia64/Makefile
#
# This file is included by the global makefile so that you can add your own
# architecture-specific flags and dependencies.
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1998-2004 by David Mosberger-Tang <davidm@hpl.hp.com>
#
KBUILD_DEFCONFIG := generic_defconfig
NM := $(CROSS_COMPILE)nm -B
CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__
OBJCOPYFLAGS := --strip-all
LDFLAGS_vmlinux := -static
KBUILD_AFLAGS_KERNEL := -mconstant-gp
EXTRA :=
cflags-y := -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f12-f15,f32-f127 \
-frename-registers -fno-optimize-sibling-calls
KBUILD_CFLAGS_KERNEL := -mconstant-gp
GAS_STATUS = $(shell $(srctree)/arch/ia64/scripts/check-gas "$(CC)" "$(OBJDUMP)")
KBUILD_CPPFLAGS += $(shell $(srctree)/arch/ia64/scripts/toolchain-flags "$(CC)" "$(OBJDUMP)" "$(READELF)")
ifeq ($(GAS_STATUS),buggy)
$(error Sorry, you need a newer version of the assember, one that is built from \
a source-tree that post-dates 18-Dec-2002. You can find a pre-compiled \
static binary of such an assembler at: \
\
ftp://ftp.hpl.hp.com/pub/linux-ia64/gas-030124.tar.gz)
endif
quiet_cmd_gzip = GZIP $@
cmd_gzip = cat $(real-prereqs) | $(KGZIP) -n -f -9 > $@
quiet_cmd_objcopy = OBJCOPY $@
cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@
KBUILD_CFLAGS += $(cflags-y)
libs-y += arch/ia64/lib/
drivers-y += arch/ia64/pci/ arch/ia64/hp/common/
PHONY += compressed check
all: compressed unwcheck
compressed: vmlinux.gz
vmlinuz: vmlinux.gz
vmlinux.gz: vmlinux.bin FORCE
$(call if_changed,gzip)
vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
unwcheck: vmlinux
-$(Q)READELF=$(READELF) $(PYTHON3) $(srctree)/arch/ia64/scripts/unwcheck.py $<
archheaders:
$(Q)$(MAKE) $(build)=arch/ia64/kernel/syscalls all
CLEAN_FILES += vmlinux.gz
install: KBUILD_IMAGE := vmlinux.gz
install:
$(call cmd,install)
define archhelp
echo '* compressed - Build compressed kernel image'
echo ' install - Install compressed kernel image'
echo '* unwcheck - Check vmlinux for invalid unwind info'
endef

View File

@ -1,102 +0,0 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_SGI_PARTITION=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_PREEMPT=y
CONFIG_IA64_PALINFO=y
CONFIG_BINFMT_MISC=m
CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
# CONFIG_IPV6 is not set
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=m
CONFIG_ATA=m
CONFIG_ATA_GENERIC=m
CONFIG_ATA_PIIX=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SPI_ATTRS=m
CONFIG_SCSI_QLOGIC_1280=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_MULTIPATH=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
CONFIG_INPUT_EVDEV=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_HW_RANDOM is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_AGP=m
CONFIG_AGP_I460=m
CONFIG_DRM=m
CONFIG_DRM_R128=m
CONFIG_SOUND=m
CONFIG_SND=m
CONFIG_SND_SEQUENCER=m
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_CS4281=m
CONFIG_USB_HIDDEV=y
CONFIG_USB=m
CONFIG_USB_MON=m
CONFIG_USB_UHCI_HCD=m
CONFIG_USB_ACM=m
CONFIG_USB_PRINTER=m
CONFIG_USB_STORAGE=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_XFS_FS=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_AUTOFS_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_UDF_FS=m
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_NFS_FS=m
CONFIG_NFS_V4=m
CONFIG_NFSD=m
CONFIG_NFSD_V4=y
CONFIG_CIFS=m
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y

View File

@ -1,206 +0,0 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=20
CONFIG_CGROUPS=y
CONFIG_CPUSETS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_SGI_PARTITION=y
CONFIG_MCKINLEY=y
CONFIG_IA64_PAGE_SIZE_64KB=y
CONFIG_IA64_CYCLONE=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
CONFIG_IA64_MCA_RECOVERY=y
CONFIG_IA64_PALINFO=y
CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
CONFIG_BINFMT_MISC=m
CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_DOCK=y
CONFIG_ACPI_PROCESSOR=m
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
# CONFIG_PNP_DEBUG_MESSAGES is not set
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_SGI_XP=m
CONFIG_ATA=y
CONFIG_ATA_GENERIC=y
CONFIG_PATA_CMD64X=y
CONFIG_ATA_PIIX=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_QLOGIC_1280=y
CONFIG_SATA_VITESSE=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_MULTIPATH=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_FUSION=y
CONFIG_FUSION_SPI=y
CONFIG_FUSION_FC=m
CONFIG_FUSION_SAS=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_NETCONSOLE=y
CONFIG_TIGON3=y
CONFIG_NET_TULIP=y
CONFIG_TULIP=m
CONFIG_E100=m
CONFIG_E1000=y
CONFIG_IGB=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_GAMEPORT=m
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=6
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_HW_RANDOM is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y
CONFIG_HPET=y
CONFIG_AGP=m
CONFIG_AGP_I460=m
CONFIG_AGP_HP_ZX1=m
CONFIG_DRM=m
CONFIG_DRM_TDFX=m
CONFIG_DRM_R128=m
CONFIG_DRM_RADEON=m
CONFIG_DRM_MGA=m
CONFIG_DRM_SIS=m
CONFIG_SOUND=m
CONFIG_SND=m
CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_VERBOSE_PRINTK=y
CONFIG_SND_DUMMY=m
CONFIG_SND_VIRMIDI=m
CONFIG_SND_MTPAV=m
CONFIG_SND_SERIAL_U16550=m
CONFIG_SND_MPU401=m
CONFIG_SND_CS4281=m
CONFIG_SND_CS46XX=m
CONFIG_SND_EMU10K1=m
CONFIG_SND_FM801=m
CONFIG_HID_GYRATION=m
CONFIG_HID_PANTHERLORD=m
CONFIG_HID_PETALYNX=m
CONFIG_HID_SAMSUNG=m
CONFIG_HID_SONY=m
CONFIG_HID_SUNPLUS=m
CONFIG_USB=m
CONFIG_USB_MON=m
CONFIG_USB_EHCI_HCD=m
CONFIG_USB_OHCI_HCD=m
CONFIG_USB_UHCI_HCD=m
CONFIG_USB_STORAGE=m
CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_MTHCA=m
CONFIG_INFINIBAND_IPOIB=m
CONFIG_INTEL_IOMMU=y
CONFIG_MSPEC=m
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
CONFIG_REISERFS_FS=y
CONFIG_REISERFS_FS_XATTR=y
CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=y
CONFIG_AUTOFS_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_UDF_FS=m
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_NFS_FS=m
CONFIG_NFS_V4=m
CONFIG_NFSD=m
CONFIG_NFSD_V4=y
CONFIG_CIFS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_CODEPAGE_775=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_CODEPAGE_852=m
CONFIG_NLS_CODEPAGE_855=m
CONFIG_NLS_CODEPAGE_857=m
CONFIG_NLS_CODEPAGE_860=m
CONFIG_NLS_CODEPAGE_861=m
CONFIG_NLS_CODEPAGE_862=m
CONFIG_NLS_CODEPAGE_863=m
CONFIG_NLS_CODEPAGE_864=m
CONFIG_NLS_CODEPAGE_865=m
CONFIG_NLS_CODEPAGE_866=m
CONFIG_NLS_CODEPAGE_869=m
CONFIG_NLS_CODEPAGE_936=m
CONFIG_NLS_CODEPAGE_950=m
CONFIG_NLS_CODEPAGE_932=m
CONFIG_NLS_CODEPAGE_949=m
CONFIG_NLS_CODEPAGE_874=m
CONFIG_NLS_ISO8859_8=m
CONFIG_NLS_CODEPAGE_1250=m
CONFIG_NLS_CODEPAGE_1251=m
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_2=m
CONFIG_NLS_ISO8859_3=m
CONFIG_NLS_ISO8859_4=m
CONFIG_NLS_ISO8859_5=m
CONFIG_NLS_ISO8859_6=m
CONFIG_NLS_ISO8859_7=m
CONFIG_NLS_ISO8859_9=m
CONFIG_NLS_ISO8859_13=m
CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_T10DIF=y

View File

@ -1,184 +0,0 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=20
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_SGI_PARTITION=y
CONFIG_MCKINLEY=y
CONFIG_IA64_CYCLONE=y
CONFIG_SMP=y
CONFIG_NR_CPUS=512
CONFIG_HOTPLUG_CPU=y
CONFIG_SPARSEMEM_MANUAL=y
CONFIG_IA64_MCA_RECOVERY=y
CONFIG_IA64_PALINFO=y
CONFIG_BINFMT_MISC=m
CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m
CONFIG_HOTPLUG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_ATA=y
CONFIG_ATA_GENERIC=y
CONFIG_PATA_CMD64X=y
CONFIG_ATA_PIIX=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_QLOGIC_1280=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_MULTIPATH=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_FUSION=y
CONFIG_FUSION_SPI=y
CONFIG_FUSION_FC=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_NETCONSOLE=y
CONFIG_TIGON3=y
CONFIG_NET_TULIP=y
CONFIG_TULIP=m
CONFIG_E100=m
CONFIG_E1000=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_GAMEPORT=m
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=6
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_HW_RANDOM is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y
CONFIG_HPET=y
CONFIG_AGP=m
CONFIG_AGP_I460=m
CONFIG_AGP_HP_ZX1=m
CONFIG_DRM=m
CONFIG_DRM_TDFX=m
CONFIG_DRM_R128=m
CONFIG_DRM_RADEON=m
CONFIG_DRM_MGA=m
CONFIG_DRM_SIS=m
CONFIG_SOUND=m
CONFIG_SND=m
CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_VERBOSE_PRINTK=y
CONFIG_SND_DUMMY=m
CONFIG_SND_VIRMIDI=m
CONFIG_SND_MTPAV=m
CONFIG_SND_SERIAL_U16550=m
CONFIG_SND_MPU401=m
CONFIG_SND_CS4281=m
CONFIG_SND_CS46XX=m
CONFIG_SND_EMU10K1=m
CONFIG_SND_FM801=m
CONFIG_USB=m
CONFIG_USB_MON=m
CONFIG_USB_EHCI_HCD=m
CONFIG_USB_OHCI_HCD=m
CONFIG_USB_UHCI_HCD=m
CONFIG_USB_STORAGE=m
CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_MTHCA=m
CONFIG_INFINIBAND_IPOIB=m
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
CONFIG_REISERFS_FS=y
CONFIG_REISERFS_FS_XATTR=y
CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=y
CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_UDF_FS=m
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_NFS_FS=m
CONFIG_NFS_V4=m
CONFIG_NFSD=m
CONFIG_NFSD_V4=y
CONFIG_CIFS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_CODEPAGE_775=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_CODEPAGE_852=m
CONFIG_NLS_CODEPAGE_855=m
CONFIG_NLS_CODEPAGE_857=m
CONFIG_NLS_CODEPAGE_860=m
CONFIG_NLS_CODEPAGE_861=m
CONFIG_NLS_CODEPAGE_862=m
CONFIG_NLS_CODEPAGE_863=m
CONFIG_NLS_CODEPAGE_864=m
CONFIG_NLS_CODEPAGE_865=m
CONFIG_NLS_CODEPAGE_866=m
CONFIG_NLS_CODEPAGE_869=m
CONFIG_NLS_CODEPAGE_936=m
CONFIG_NLS_CODEPAGE_950=m
CONFIG_NLS_CODEPAGE_932=m
CONFIG_NLS_CODEPAGE_949=m
CONFIG_NLS_CODEPAGE_874=m
CONFIG_NLS_ISO8859_8=m
CONFIG_NLS_CODEPAGE_1250=m
CONFIG_NLS_CODEPAGE_1251=m
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_2=m
CONFIG_NLS_ISO8859_3=m
CONFIG_NLS_ISO8859_4=m
CONFIG_NLS_ISO8859_5=m
CONFIG_NLS_ISO8859_6=m
CONFIG_NLS_ISO8859_7=m
CONFIG_NLS_ISO8859_9=m
CONFIG_NLS_ISO8859_13=m
CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_CRYPTO_MD5=y

View File

@ -1,169 +0,0 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=20
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_SGI_PARTITION=y
CONFIG_MCKINLEY=y
CONFIG_IA64_PAGE_SIZE_64KB=y
CONFIG_IA64_CYCLONE=y
CONFIG_SMP=y
CONFIG_NR_CPUS=16
CONFIG_HOTPLUG_CPU=y
CONFIG_PERMIT_BSP_REMOVE=y
CONFIG_FORCE_CPEI_RETARGET=y
CONFIG_IA64_MCA_RECOVERY=y
CONFIG_IA64_PALINFO=y
CONFIG_KEXEC=y
CONFIG_BINFMT_MISC=m
CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m
CONFIG_HOTPLUG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_ATA=y
CONFIG_ATA_GENERIC=y
CONFIG_PATA_CMD64X=y
CONFIG_ATA_PIIX=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_QLOGIC_1280=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_MULTIPATH=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_FUSION=y
CONFIG_FUSION_SPI=y
CONFIG_FUSION_FC=y
CONFIG_FUSION_CTL=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_NETCONSOLE=y
CONFIG_TIGON3=y
CONFIG_NET_TULIP=y
CONFIG_TULIP=m
CONFIG_E100=m
CONFIG_E1000=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_GAMEPORT=m
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=6
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_HW_RANDOM is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y
CONFIG_HPET=y
CONFIG_AGP=m
CONFIG_AGP_I460=m
CONFIG_DRM=m
CONFIG_DRM_TDFX=m
CONFIG_DRM_R128=m
CONFIG_DRM_RADEON=m
CONFIG_DRM_MGA=m
CONFIG_DRM_SIS=m
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=m
CONFIG_USB_OHCI_HCD=m
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_STORAGE=m
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
CONFIG_REISERFS_FS=y
CONFIG_REISERFS_FS_XATTR=y
CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=y
CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_UDF_FS=m
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_NFS_FS=m
CONFIG_NFS_V4=m
CONFIG_NFSD=m
CONFIG_NFSD_V4=y
CONFIG_CIFS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_CODEPAGE_775=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_CODEPAGE_852=m
CONFIG_NLS_CODEPAGE_855=m
CONFIG_NLS_CODEPAGE_857=m
CONFIG_NLS_CODEPAGE_860=m
CONFIG_NLS_CODEPAGE_861=m
CONFIG_NLS_CODEPAGE_862=m
CONFIG_NLS_CODEPAGE_863=m
CONFIG_NLS_CODEPAGE_864=m
CONFIG_NLS_CODEPAGE_865=m
CONFIG_NLS_CODEPAGE_866=m
CONFIG_NLS_CODEPAGE_869=m
CONFIG_NLS_CODEPAGE_936=m
CONFIG_NLS_CODEPAGE_950=m
CONFIG_NLS_CODEPAGE_932=m
CONFIG_NLS_CODEPAGE_949=m
CONFIG_NLS_CODEPAGE_874=m
CONFIG_NLS_ISO8859_8=m
CONFIG_NLS_CODEPAGE_1250=m
CONFIG_NLS_CODEPAGE_1251=m
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_2=m
CONFIG_NLS_ISO8859_3=m
CONFIG_NLS_ISO8859_4=m
CONFIG_NLS_ISO8859_5=m
CONFIG_NLS_ISO8859_6=m
CONFIG_NLS_ISO8859_7=m
CONFIG_NLS_ISO8859_9=m
CONFIG_NLS_ISO8859_13=m
CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_IA64_GRANULE_16MB=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD5=y

View File

@ -1,148 +0,0 @@
CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_MCKINLEY=y
CONFIG_SMP=y
CONFIG_NR_CPUS=16
CONFIG_HOTPLUG_CPU=y
CONFIG_FLATMEM_MANUAL=y
CONFIG_IA64_MCA_RECOVERY=y
CONFIG_IA64_PALINFO=y
CONFIG_CRASH_DUMP=y
CONFIG_BINFMT_MISC=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_ATA=y
CONFIG_ATA_GENERIC=y
CONFIG_PATA_CMD64X=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_QLOGIC_1280=y
CONFIG_FUSION=y
CONFIG_FUSION_SPI=y
CONFIG_FUSION_FC=y
CONFIG_FUSION_CTL=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
CONFIG_TIGON3=y
CONFIG_NET_TULIP=y
CONFIG_TULIP=y
CONFIG_TULIP_MWI=y
CONFIG_TULIP_MMIO=y
CONFIG_TULIP_NAPI=y
CONFIG_TULIP_NAPI_HW_MITIGATION=y
CONFIG_E100=y
CONFIG_E1000=y
CONFIG_INPUT_JOYDEV=y
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=8
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_HW_RANDOM is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y
CONFIG_I2C_CHARDEV=y
CONFIG_AGP=y
CONFIG_AGP_HP_ZX1=y
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
CONFIG_FB_RADEON=y
CONFIG_FB_RADEON_DEBUG=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SEQUENCER=y
CONFIG_SND_MIXER_OSS=y
CONFIG_SND_PCM_OSS=y
CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_FM801=y
CONFIG_USB_HIDDEV=y
CONFIG_USB=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_UDF_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_NFSD=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=y
CONFIG_NLS_CODEPAGE_775=y
CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_CODEPAGE_852=y
CONFIG_NLS_CODEPAGE_855=y
CONFIG_NLS_CODEPAGE_857=y
CONFIG_NLS_CODEPAGE_860=y
CONFIG_NLS_CODEPAGE_861=y
CONFIG_NLS_CODEPAGE_862=y
CONFIG_NLS_CODEPAGE_863=y
CONFIG_NLS_CODEPAGE_864=y
CONFIG_NLS_CODEPAGE_865=y
CONFIG_NLS_CODEPAGE_866=y
CONFIG_NLS_CODEPAGE_869=y
CONFIG_NLS_CODEPAGE_936=y
CONFIG_NLS_CODEPAGE_950=y
CONFIG_NLS_CODEPAGE_932=y
CONFIG_NLS_CODEPAGE_949=y
CONFIG_NLS_CODEPAGE_874=y
CONFIG_NLS_ISO8859_8=y
CONFIG_NLS_CODEPAGE_1251=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_2=y
CONFIG_NLS_ISO8859_3=y
CONFIG_NLS_ISO8859_4=y
CONFIG_NLS_ISO8859_5=y
CONFIG_NLS_ISO8859_6=y
CONFIG_NLS_ISO8859_7=y
CONFIG_NLS_ISO8859_9=y
CONFIG_NLS_ISO8859_13=y
CONFIG_NLS_ISO8859_14=y
CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_KOI8_R=y
CONFIG_NLS_KOI8_U=y
CONFIG_NLS_UTF8=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_IA64_PRINT_HAZARDS=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m

View File

@ -1,10 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# ia64/platform/hp/common/Makefile
#
# Copyright (C) 2002 Hewlett Packard
# Copyright (C) Alex Williamson (alex_williamson@hp.com)
#
obj-$(CONFIG_IA64_HP_SBA_IOMMU) += sba_iommu.o
obj-$(CONFIG_IA64_HP_AML_NFW) += aml_nfw.o

View File

@ -1,232 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* OpRegion handler to allow AML to call native firmware
*
* (c) Copyright 2007 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
* This driver implements HP Open Source Review Board proposal 1842,
* which was approved on 9/20/2006.
*
* For technical documentation, see the HP SPPA Firmware EAS, Appendix F.
*
* ACPI does not define a mechanism for AML methods to call native firmware
* interfaces such as PAL or SAL. This OpRegion handler adds such a mechanism.
* After the handler is installed, an AML method can call native firmware by
* storing the arguments and firmware entry point to specific offsets in the
* OpRegion. When AML reads the "return value" offset from the OpRegion, this
* handler loads up the arguments, makes the firmware call, and returns the
* result.
*/
#include <linux/module.h>
#include <linux/acpi.h>
#include <asm/sal.h>
MODULE_AUTHOR("Bjorn Helgaas <bjorn.helgaas@hp.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ACPI opregion handler for native firmware calls");
static bool force_register;
module_param_named(force, force_register, bool, 0);
MODULE_PARM_DESC(force, "Install opregion handler even without HPQ5001 device");
#define AML_NFW_SPACE 0xA1
struct ia64_pdesc {
void *ip;
void *gp;
};
/*
* N.B. The layout of this structure is defined in the HP SPPA FW EAS, and
* the member offsets are embedded in AML methods.
*/
struct ia64_nfw_context {
u64 arg[8];
struct ia64_sal_retval ret;
u64 ip;
u64 gp;
u64 pad[2];
};
static void *virt_map(u64 address)
{
if (address & (1UL << 63))
return (void *) (__IA64_UNCACHED_OFFSET | address);
return __va(address);
}
static void aml_nfw_execute(struct ia64_nfw_context *c)
{
struct ia64_pdesc virt_entry;
ia64_sal_handler entry;
virt_entry.ip = virt_map(c->ip);
virt_entry.gp = virt_map(c->gp);
entry = (ia64_sal_handler) &virt_entry;
IA64_FW_CALL(entry, c->ret,
c->arg[0], c->arg[1], c->arg[2], c->arg[3],
c->arg[4], c->arg[5], c->arg[6], c->arg[7]);
}
static void aml_nfw_read_arg(u8 *offset, u32 bit_width, u64 *value)
{
switch (bit_width) {
case 8:
*value = *(u8 *)offset;
break;
case 16:
*value = *(u16 *)offset;
break;
case 32:
*value = *(u32 *)offset;
break;
case 64:
*value = *(u64 *)offset;
break;
}
}
static void aml_nfw_write_arg(u8 *offset, u32 bit_width, u64 *value)
{
switch (bit_width) {
case 8:
*(u8 *) offset = *value;
break;
case 16:
*(u16 *) offset = *value;
break;
case 32:
*(u32 *) offset = *value;
break;
case 64:
*(u64 *) offset = *value;
break;
}
}
static acpi_status aml_nfw_handler(u32 function, acpi_physical_address address,
u32 bit_width, u64 *value, void *handler_context,
void *region_context)
{
struct ia64_nfw_context *context = handler_context;
u8 *offset = (u8 *) context + address;
if (bit_width != 8 && bit_width != 16 &&
bit_width != 32 && bit_width != 64)
return AE_BAD_PARAMETER;
if (address + (bit_width >> 3) > sizeof(struct ia64_nfw_context))
return AE_BAD_PARAMETER;
switch (function) {
case ACPI_READ:
if (address == offsetof(struct ia64_nfw_context, ret))
aml_nfw_execute(context);
aml_nfw_read_arg(offset, bit_width, value);
break;
case ACPI_WRITE:
aml_nfw_write_arg(offset, bit_width, value);
break;
}
return AE_OK;
}
static struct ia64_nfw_context global_context;
static int global_handler_registered;
static int aml_nfw_add_global_handler(void)
{
acpi_status status;
if (global_handler_registered)
return 0;
status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
AML_NFW_SPACE, aml_nfw_handler, NULL, &global_context);
if (ACPI_FAILURE(status))
return -ENODEV;
global_handler_registered = 1;
printk(KERN_INFO "Global 0x%02X opregion handler registered\n",
AML_NFW_SPACE);
return 0;
}
static int aml_nfw_remove_global_handler(void)
{
acpi_status status;
if (!global_handler_registered)
return 0;
status = acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
AML_NFW_SPACE, aml_nfw_handler);
if (ACPI_FAILURE(status))
return -ENODEV;
global_handler_registered = 0;
printk(KERN_INFO "Global 0x%02X opregion handler removed\n",
AML_NFW_SPACE);
return 0;
}
static int aml_nfw_add(struct acpi_device *device)
{
/*
* We would normally allocate a new context structure and install
* the address space handler for the specific device we found.
* But the HP-UX implementation shares a single global context
* and always puts the handler at the root, so we'll do the same.
*/
return aml_nfw_add_global_handler();
}
static void aml_nfw_remove(struct acpi_device *device)
{
aml_nfw_remove_global_handler();
}
static const struct acpi_device_id aml_nfw_ids[] = {
{"HPQ5001", 0},
{"", 0}
};
static struct acpi_driver acpi_aml_nfw_driver = {
.name = "native firmware",
.ids = aml_nfw_ids,
.ops = {
.add = aml_nfw_add,
.remove = aml_nfw_remove,
},
};
static int __init aml_nfw_init(void)
{
int result;
if (force_register)
aml_nfw_add_global_handler();
result = acpi_bus_register_driver(&acpi_aml_nfw_driver);
if (result < 0) {
aml_nfw_remove_global_handler();
return result;
}
return 0;
}
static void __exit aml_nfw_exit(void)
{
acpi_bus_unregister_driver(&acpi_aml_nfw_driver);
aml_nfw_remove_global_handler();
}
module_init(aml_nfw_init);
module_exit(aml_nfw_exit);

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
generic-y += agp.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += vtime.h

View File

@ -1,49 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* IA64 specific ACPICA environments and implementation
*
* Copyright (C) 2014, Intel Corporation
* Author: Lv Zheng <lv.zheng@intel.com>
*/
#ifndef _ASM_IA64_ACENV_H
#define _ASM_IA64_ACENV_H
#include <asm/intrinsics.h>
#define COMPILER_DEPENDENT_INT64 long
#define COMPILER_DEPENDENT_UINT64 unsigned long
/* Asm macros */
static inline int
ia64_acpi_acquire_global_lock(unsigned int *lock)
{
unsigned int old, new, val;
do {
old = *lock;
new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
val = ia64_cmpxchg4_acq(lock, new, old);
} while (unlikely (val != old));
return (new < 3) ? -1 : 0;
}
static inline int
ia64_acpi_release_global_lock(unsigned int *lock)
{
unsigned int old, new, val;
do {
old = *lock;
new = old & ~0x3;
val = ia64_cmpxchg4_acq(lock, new, old);
} while (unlikely (val != old));
return old & 0x1;
}
#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
((Acq) = ia64_acpi_acquire_global_lock(&facs->global_lock))
#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
#endif /* _ASM_IA64_ACENV_H */

View File

@ -1,17 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* (c) Copyright 2003, 2006 Hewlett-Packard Development Company, L.P.
* Alex Williamson <alex.williamson@hp.com>
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
* Vendor specific extensions to ACPI.
*/
#ifndef _ASM_IA64_ACPI_EXT_H
#define _ASM_IA64_ACPI_EXT_H
#include <linux/types.h>
extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length);
#endif /* _ASM_IA64_ACPI_EXT_H */

View File

@ -1,110 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
* Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*/
#ifndef _ASM_ACPI_H
#define _ASM_ACPI_H
#ifdef __KERNEL__
#include <acpi/proc_cap_intel.h>
#include <linux/init.h>
#include <linux/numa.h>
#include <asm/numa.h>
extern int acpi_lapic;
#define acpi_disabled 0 /* ACPI always enabled on IA64 */
#define acpi_noirq 0 /* ACPI always enabled on IA64 */
#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
static inline bool acpi_has_cpu_in_madt(void)
{
return !!acpi_lapic;
}
#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
static inline void disable_acpi(void) { }
int acpi_request_vector (u32 int_type);
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
/* Low-level suspend routine. */
extern int acpi_suspend_lowlevel(void);
static inline unsigned long acpi_get_wakeup_address(void)
{
return 0;
}
/*
* Record the cpei override flag and current logical cpu. This is
* useful for CPU removal.
*/
extern unsigned int can_cpei_retarget(void);
extern unsigned int is_cpu_cpei_target(unsigned int cpu);
extern void set_cpei_target_cpu(unsigned int cpu);
extern unsigned int get_cpei_target_cpu(void);
extern void prefill_possible_map(void);
#ifdef CONFIG_ACPI_HOTPLUG_CPU
extern int additional_cpus;
#else
#define additional_cpus 0
#endif
#ifdef CONFIG_ACPI_NUMA
#if MAX_NUMNODES > 256
#define MAX_PXM_DOMAINS MAX_NUMNODES
#else
#define MAX_PXM_DOMAINS (256)
#endif
extern int pxm_to_nid_map[MAX_PXM_DOMAINS];
extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
#endif
static inline bool arch_has_acpi_pdc(void) { return true; }
static inline void arch_acpi_set_proc_cap_bits(u32 *cap)
{
*cap |= ACPI_PROC_CAP_EST_CAPABILITY_SMP;
}
#ifdef CONFIG_ACPI_NUMA
extern cpumask_t early_cpu_possible_map;
#define for_each_possible_early_cpu(cpu) \
for_each_cpu((cpu), &early_cpu_possible_map)
static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
{
int low_cpu, high_cpu;
int cpu;
int next_nid = 0;
low_cpu = cpumask_weight(&early_cpu_possible_map);
high_cpu = max(low_cpu, min_cpus);
high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
for (cpu = low_cpu; cpu < high_cpu; cpu++) {
cpumask_set_cpu(cpu, &early_cpu_possible_map);
if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
node_cpuid[cpu].nid = next_nid;
next_nid++;
if (next_nid >= num_online_nodes())
next_nid = 0;
}
}
}
extern void acpi_numa_fixup(void);
#endif /* CONFIG_ACPI_NUMA */
#endif /*__KERNEL__*/
#endif /*_ASM_ACPI_H*/

View File

@ -1 +0,0 @@
#include <generated/asm-offsets.h>

View File

@ -1,30 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_IA64_ASM_PROTOTYPES_H
#define _ASM_IA64_ASM_PROTOTYPES_H
#include <asm/cacheflush.h>
#include <asm/checksum.h>
#include <asm/esi.h>
#include <asm/ftrace.h>
#include <asm/page.h>
#include <asm/pal.h>
#include <asm/string.h>
#include <linux/uaccess.h>
#include <asm/unwind.h>
#include <asm/xor.h>
extern const char ia64_ivt[];
signed int __divsi3(signed int, unsigned int);
signed int __modsi3(signed int, unsigned int);
signed long long __divdi3(signed long long, unsigned long long);
signed long long __moddi3(signed long long, unsigned long long);
unsigned int __udivsi3(unsigned int, unsigned int);
unsigned int __umodsi3(unsigned int, unsigned int);
unsigned long long __udivdi3(unsigned long long, unsigned long long);
unsigned long long __umoddi3(unsigned long long, unsigned long long);
#endif /* _ASM_IA64_ASM_PROTOTYPES_H */

View File

@ -1,136 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_IA64_ASMMACRO_H
#define _ASM_IA64_ASMMACRO_H
/*
* Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#define ENTRY(name) \
.align 32; \
.proc name; \
name:
#define ENTRY_MIN_ALIGN(name) \
.align 16; \
.proc name; \
name:
#define GLOBAL_ENTRY(name) \
.global name; \
ENTRY(name)
#define END(name) \
.endp name
/*
* Helper macros to make unwind directives more readable:
*/
/* prologue_gr: */
#define ASM_UNW_PRLG_RP 0x8
#define ASM_UNW_PRLG_PFS 0x4
#define ASM_UNW_PRLG_PSP 0x2
#define ASM_UNW_PRLG_PR 0x1
#define ASM_UNW_PRLG_GRSAVE(ninputs) (32+(ninputs))
/*
* Helper macros for accessing user memory.
*
* When adding any new .section/.previous entries here, make sure to
* also add it to the DISCARD section in arch/ia64/kernel/gate.lds.S or
* unpleasant things will happen.
*/
.section "__ex_table", "a" // declare section & section attributes
.previous
# define EX(y,x...) \
.xdata4 "__ex_table", 99f-., y-.; \
[99:] x
# define EXCLR(y,x...) \
.xdata4 "__ex_table", 99f-., y-.+4; \
[99:] x
/*
* Tag MCA recoverable instruction ranges.
*/
.section "__mca_table", "a" // declare section & section attributes
.previous
# define MCA_RECOVER_RANGE(y) \
.xdata4 "__mca_table", y-., 99f-.; \
[99:]
/*
* Mark instructions that need a load of a virtual address patched to be
* a load of a physical address. We use this either in critical performance
* path (ivt.S - TLB miss processing) or in places where it might not be
* safe to use a "tpa" instruction (mca_asm.S - error recovery).
*/
.section ".data..patch.vtop", "a" // declare section & section attributes
.previous
#define LOAD_PHYSICAL(pr, reg, obj) \
[1:](pr)movl reg = obj; \
.xdata4 ".data..patch.vtop", 1b-.
/*
* For now, we always put in the McKinley E9 workaround. On CPUs that don't need it,
* we'll patch out the work-around bundles with NOPs, so their impact is minimal.
*/
#define DO_MCKINLEY_E9_WORKAROUND
#ifdef DO_MCKINLEY_E9_WORKAROUND
.section ".data..patch.mckinley_e9", "a"
.previous
/* workaround for Itanium 2 Errata 9: */
# define FSYS_RETURN \
.xdata4 ".data..patch.mckinley_e9", 1f-.; \
1:{ .mib; \
nop.m 0; \
mov r16=ar.pfs; \
br.call.sptk.many b7=2f;; \
}; \
2:{ .mib; \
nop.m 0; \
mov ar.pfs=r16; \
br.ret.sptk.many b6;; \
}
#else
# define FSYS_RETURN br.ret.sptk.many b6
#endif
/*
* If physical stack register size is different from DEF_NUM_STACK_REG,
* dynamically patch the kernel for correct size.
*/
.section ".data..patch.phys_stack_reg", "a"
.previous
#define LOAD_PHYS_STACK_REG_SIZE(reg) \
[1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \
.xdata4 ".data..patch.phys_stack_reg", 1b-.
/*
* Up until early 2004, use of .align within a function caused bad unwind info.
* TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
* otherwise.
*/
#ifdef HAVE_WORKING_TEXT_ALIGN
# define TEXT_ALIGN(n) .align n
#else
# define TEXT_ALIGN(n)
#endif
#ifdef HAVE_SERIALIZE_DIRECTIVE
# define dv_serialize_data .serialize.data
# define dv_serialize_instruction .serialize.instruction
#else
# define dv_serialize_data
# define dv_serialize_instruction
#endif
#endif /* _ASM_IA64_ASMMACRO_H */

View File

@ -1,216 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_IA64_ATOMIC_H
#define _ASM_IA64_ATOMIC_H
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
* NOTE: don't mess with the types below! The "unsigned long" and
* "int" types were carefully placed so as to ensure proper operation
* of the macros.
*
* Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/types.h>
#include <asm/intrinsics.h>
#include <asm/barrier.h>
#define ATOMIC64_INIT(i) { (i) }
#define arch_atomic_read(v) READ_ONCE((v)->counter)
#define arch_atomic64_read(v) READ_ONCE((v)->counter)
#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
#define arch_atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op, c_op) \
static __inline__ int \
ia64_atomic_##op (int i, atomic_t *v) \
{ \
__s32 old, new; \
CMPXCHG_BUGCHECK_DECL \
\
do { \
CMPXCHG_BUGCHECK(v); \
old = arch_atomic_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
return new; \
}
#define ATOMIC_FETCH_OP(op, c_op) \
static __inline__ int \
ia64_atomic_fetch_##op (int i, atomic_t *v) \
{ \
__s32 old, new; \
CMPXCHG_BUGCHECK_DECL \
\
do { \
CMPXCHG_BUGCHECK(v); \
old = arch_atomic_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
return old; \
}
#define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op, c_op) \
ATOMIC_FETCH_OP(op, c_op)
ATOMIC_OPS(add, +)
ATOMIC_OPS(sub, -)
#ifdef __OPTIMIZE__
#define __ia64_atomic_const(i) \
static const int __ia64_atomic_p = __builtin_constant_p(i) ? \
((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \
(i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\
__ia64_atomic_p
#else
#define __ia64_atomic_const(i) 0
#endif
#define arch_atomic_add_return(i,v) \
({ \
int __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
: ia64_atomic_add(__ia64_aar_i, v); \
})
#define arch_atomic_sub_return(i,v) \
({ \
int __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
: ia64_atomic_sub(__ia64_asr_i, v); \
})
#define arch_atomic_fetch_add(i,v) \
({ \
int __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
: ia64_atomic_fetch_add(__ia64_aar_i, v); \
})
#define arch_atomic_fetch_sub(i,v) \
({ \
int __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
: ia64_atomic_fetch_sub(__ia64_asr_i, v); \
})
ATOMIC_FETCH_OP(and, &)
ATOMIC_FETCH_OP(or, |)
ATOMIC_FETCH_OP(xor, ^)
#define arch_atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
#define arch_atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
#define arch_atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
#define arch_atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
#define arch_atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
#define arch_atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP
#define ATOMIC64_OP(op, c_op) \
static __inline__ s64 \
ia64_atomic64_##op (s64 i, atomic64_t *v) \
{ \
s64 old, new; \
CMPXCHG_BUGCHECK_DECL \
\
do { \
CMPXCHG_BUGCHECK(v); \
old = arch_atomic64_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
return new; \
}
#define ATOMIC64_FETCH_OP(op, c_op) \
static __inline__ s64 \
ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \
{ \
s64 old, new; \
CMPXCHG_BUGCHECK_DECL \
\
do { \
CMPXCHG_BUGCHECK(v); \
old = arch_atomic64_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
return old; \
}
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS(add, +)
ATOMIC64_OPS(sub, -)
#define arch_atomic64_add_return(i,v) \
({ \
s64 __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
: ia64_atomic64_add(__ia64_aar_i, v); \
})
#define arch_atomic64_sub_return(i,v) \
({ \
s64 __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
: ia64_atomic64_sub(__ia64_asr_i, v); \
})
#define arch_atomic64_fetch_add(i,v) \
({ \
s64 __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
: ia64_atomic64_fetch_add(__ia64_aar_i, v); \
})
#define arch_atomic64_fetch_sub(i,v) \
({ \
s64 __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
: ia64_atomic64_fetch_sub(__ia64_asr_i, v); \
})
ATOMIC64_FETCH_OP(and, &)
ATOMIC64_FETCH_OP(or, |)
ATOMIC64_FETCH_OP(xor, ^)
#define arch_atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
#define arch_atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
#define arch_atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
#define arch_atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
#define arch_atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
#define arch_atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP
#define arch_atomic_add(i,v) (void)arch_atomic_add_return((i), (v))
#define arch_atomic_sub(i,v) (void)arch_atomic_sub_return((i), (v))
#define arch_atomic64_add(i,v) (void)arch_atomic64_add_return((i), (v))
#define arch_atomic64_sub(i,v) (void)arch_atomic64_sub_return((i), (v))
#endif /* _ASM_IA64_ATOMIC_H */

View File

@ -1,79 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Memory barrier definitions. This is based on information published
* in the Processor Abstraction Layer and the System Abstraction Layer
* manual.
*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
*/
#ifndef _ASM_IA64_BARRIER_H
#define _ASM_IA64_BARRIER_H
#include <linux/compiler.h>
/*
* Macros to force memory ordering. In these descriptions, "previous"
* and "subsequent" refer to program order; "visible" means that all
* architecturally visible effects of a memory access have occurred
* (at a minimum, this means the memory has been read or written).
*
* wmb(): Guarantees that all preceding stores to memory-
* like regions are visible before any subsequent
* stores and that all following stores will be
* visible only after all previous stores.
* rmb(): Like wmb(), but for reads.
* mb(): wmb()/rmb() combo, i.e., all previous memory
* accesses are visible before all subsequent
* accesses and vice versa. This is also known as
* a "fence."
*
* Note: "mb()" and its variants cannot be used as a fence to order
* accesses to memory mapped I/O registers. For that, mf.a needs to
* be used. However, we don't want to always use mf.a because (a)
* it's (presumably) much slower than mf and (b) mf.a is supported for
* sequential memory pages only.
*/
#define mb() ia64_mf()
#define rmb() mb()
#define wmb() mb()
#define dma_rmb() mb()
#define dma_wmb() mb()
# define __smp_mb() mb()
#define __smp_mb__before_atomic() barrier()
#define __smp_mb__after_atomic() barrier()
/*
* IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
* need for asm trickery!
*/
#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
WRITE_ONCE(*p, v); \
} while (0)
#define __smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
___p1; \
})
/*
* The group barrier in front of the rsm & ssm are necessary to ensure
* that none of the previous instructions in the same group are
* affected by the rsm/ssm.
*/
#include <asm-generic/barrier.h>
#endif /* _ASM_IA64_BARRIER_H */

View File

@ -1,453 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_IA64_BITOPS_H
#define _ASM_IA64_BITOPS_H
/*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
* O(1) scheduler patch
*/
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/intrinsics.h>
#include <asm/barrier.h>
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* This function is atomic and may not be reordered. See __set_bit()
* if you do not require the atomic guarantees.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*
* The address must be (at least) "long" aligned.
* Note that there are driver (e.g., eepro100) which use these operations to
* operate on hw-defined data-structures, so we can't easily change these
* operations to force a bigger alignment.
*
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
static __inline__ void
set_bit (int nr, volatile void *addr)
{
__u32 bit, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
bit = 1 << (nr & 31);
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old | bit;
} while (cmpxchg_acq(m, old, new) != old);
}
/**
* arch___set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike set_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __always_inline void
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
}
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
* you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
* in order to ensure changes are visible on other processors.
*/
static __inline__ void
clear_bit (int nr, volatile void *addr)
{
__u32 mask, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
mask = ~(1 << (nr & 31));
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old & mask;
} while (cmpxchg_acq(m, old, new) != old);
}
/**
* clear_bit_unlock - Clears a bit in memory with release
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit_unlock() is atomic and may not be reordered. It does
* contain a memory barrier suitable for unlock type operations.
*/
static __inline__ void
clear_bit_unlock (int nr, volatile void *addr)
{
__u32 mask, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
mask = ~(1 << (nr & 31));
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old & mask;
} while (cmpxchg_rel(m, old, new) != old);
}
/**
* __clear_bit_unlock - Non-atomically clears a bit in memory with release
* @nr: Bit to clear
* @addr: Address to start counting from
*
* Similarly to clear_bit_unlock, the implementation uses a store
* with release semantics. See also arch_spin_unlock().
*/
static __inline__ void
__clear_bit_unlock(int nr, void *addr)
{
__u32 * const m = (__u32 *) addr + (nr >> 5);
__u32 const new = *m & ~(1 << (nr & 31));
ia64_st4_rel_nta(m, new);
}
/**
* arch___clear_bit - Clears a bit in memory (non-atomic version)
* @nr: the bit to clear
* @addr: the address to start counting from
*
* Unlike clear_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __always_inline void
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
*((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
}
/**
* change_bit - Toggle a bit in memory
* @nr: Bit to toggle
* @addr: Address to start counting from
*
* change_bit() is atomic and may not be reordered.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static __inline__ void
change_bit (int nr, volatile void *addr)
{
__u32 bit, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
bit = (1 << (nr & 31));
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old ^ bit;
} while (cmpxchg_acq(m, old, new) != old);
}
/**
* arch___change_bit - Toggle a bit in memory
* @nr: the bit to toggle
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __always_inline void
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
}
/**
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies the acquisition side of the memory barrier.
*/
static __inline__ int
test_and_set_bit (int nr, volatile void *addr)
{
__u32 bit, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
bit = 1 << (nr & 31);
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old | bit;
} while (cmpxchg_acq(m, old, new) != old);
return (old & bit) != 0;
}
/**
* test_and_set_bit_lock - Set a bit and return its old value for lock
* @nr: Bit to set
* @addr: Address to count from
*
* This is the same as test_and_set_bit on ia64
*/
#define test_and_set_bit_lock test_and_set_bit
/**
* arch___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static __always_inline bool
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
__u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
int oldbitset = (*p & m) != 0;
*p |= m;
return oldbitset;
}
/**
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies the acquisition side of the memory barrier.
*/
static __inline__ int
test_and_clear_bit (int nr, volatile void *addr)
{
__u32 mask, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
mask = ~(1 << (nr & 31));
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old & mask;
} while (cmpxchg_acq(m, old, new) != old);
return (old & ~mask) != 0;
}
/**
* arch___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static __always_inline bool
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
__u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
int oldbitset = (*p & m) != 0;
*p &= ~m;
return oldbitset;
}
/**
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies the acquisition side of the memory barrier.
*/
static __inline__ int
test_and_change_bit (int nr, volatile void *addr)
{
__u32 bit, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
bit = (1 << (nr & 31));
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old ^ bit;
} while (cmpxchg_acq(m, old, new) != old);
return (old & bit) != 0;
}
/**
* arch___test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
*/
static __always_inline bool
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
__u32 old, bit = (1 << (nr & 31));
__u32 *m = (__u32 *) addr + (nr >> 5);
old = *m;
*m = old ^ bit;
return (old & bit) != 0;
}
#define arch_test_bit generic_test_bit
#define arch_test_bit_acquire generic_test_bit_acquire
/**
* ffz - find the first zero bit in a long word
* @x: The long word to find the bit in
*
* Returns the bit-number (0..63) of the first (least significant) zero bit.
* Undefined if no zero exists, so code should check against ~0UL first...
*/
static inline unsigned long
ffz (unsigned long x)
{
unsigned long result;
result = ia64_popcnt(x & (~x - 1));
return result;
}
/**
* __ffs - find first bit in word.
* @x: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static __inline__ unsigned long
__ffs (unsigned long x)
{
unsigned long result;
result = ia64_popcnt((x-1) & ~x);
return result;
}
#ifdef __KERNEL__
/*
* Return bit number of last (most-significant) bit set. Undefined
* for x==0. Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
*/
static inline unsigned long
ia64_fls (unsigned long x)
{
long double d = x;
long exp;
exp = ia64_getf_exp(d);
return exp - 0xffff;
}
/*
* Find the last (most significant) bit set. Returns 0 for x==0 and
* bits are numbered from 1..32 (e.g., fls(9) == 4).
*/
static inline int fls(unsigned int t)
{
unsigned long x = t & 0xffffffffu;
if (!x)
return 0;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ia64_popcnt(x);
}
/*
* Find the last (most significant) bit set. Undefined for x==0.
* Bits are numbered from 0..63 (e.g., __fls(9) == 3).
*/
static inline unsigned long
__fls (unsigned long x)
{
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x |= x >> 32;
return ia64_popcnt(x) - 1;
}
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/builtin-ffs.h>
/*
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
*/
static __inline__ unsigned long __arch_hweight64(unsigned long x)
{
unsigned long result;
result = ia64_popcnt(x);
return result;
}
#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
#define __arch_hweight8(x) ((unsigned int) __arch_hweight64((x) & 0xfful))
#include <asm-generic/bitops/const_hweight.h>
#endif /* __KERNEL__ */
#ifdef __KERNEL__
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
#include <asm-generic/bitops/sched.h>
#endif /* __KERNEL__ */
#endif /* _ASM_IA64_BITOPS_H */

Some files were not shown because too many files have changed in this diff Show More