2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-18 07:35:12 +08:00

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 update from Martin Schwidefsky:
 "Add support to generate code for the latest machine zEC12, MOD and XOR
  instruction support for the BPF jit compiler, the dasd safe offline
  feature and the big one: the s390 architecture gets PCI support!!
  Right before the world ends on the 21st ;-)"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (41 commits)
  s390/qdio: rename the misleading PCI flag of qdio devices
  s390/pci: remove obsolete email addresses
  s390/pci: speed up __iowrite64_copy by using pci store block insn
  s390/pci: enable NEED_DMA_MAP_STATE
  s390/pci: no msleep in potential IRQ context
  s390/pci: fix potential NULL pointer dereference in dma_free_seg_table()
  s390/pci: use kmem_cache_zalloc instead of kmem_cache_alloc/memset
  s390/bpf,jit: add support for XOR instruction
  s390/bpf,jit: add support MOD instruction
  s390/cio: fix pgid reserved check
  vga: compile fix, disable vga for s390
  s390/pci: add PCI Kconfig options
  s390/pci: s390 specific PCI sysfs attributes
  s390/pci: PCI hotplug support via SCLP
  s390/pci: CHSC PCI support for error and availability events
  s390/pci: DMA support
  s390/pci: PCI adapter interrupts for MSI/MSI-X
  s390/bitops: find leftmost bit instruction support
  s390/pci: CLP interface
  s390/pci: base support
  ...
This commit is contained in:
Linus Torvalds 2012-12-13 14:20:19 -08:00
commit c7708fac5a
80 changed files with 5331 additions and 792 deletions

View File

@ -6,3 +6,4 @@ obj-$(CONFIG_S390_HYPFS_FS) += hypfs/
obj-$(CONFIG_APPLDATA_BASE) += appldata/
obj-$(CONFIG_MATHEMU) += math-emu/
obj-y += net/
obj-$(CONFIG_PCI) += pci/

View File

@ -34,12 +34,6 @@ config GENERIC_BUG
config GENERIC_BUG_RELATIVE_POINTERS
def_bool y
config NO_IOMEM
def_bool y
config NO_DMA
def_bool y
config ARCH_DMA_ADDR_T_64BIT
def_bool 64BIT
@ -58,6 +52,12 @@ config KEXEC
config AUDIT_ARCH
def_bool y
config NO_IOPORT
def_bool y
config PCI_QUIRKS
def_bool n
config S390
def_bool y
select USE_GENERIC_SMP_HELPERS if SMP
@ -171,6 +171,10 @@ config HAVE_MARCH_Z196_FEATURES
def_bool n
select HAVE_MARCH_Z10_FEATURES
config HAVE_MARCH_ZEC12_FEATURES
def_bool n
select HAVE_MARCH_Z196_FEATURES
choice
prompt "Processor type"
default MARCH_G5
@ -222,6 +226,13 @@ config MARCH_Z196
(2818 and 2817 series). The kernel will be slightly faster but will
not work on older machines.
config MARCH_ZEC12
bool "IBM zEC12"
select HAVE_MARCH_ZEC12_FEATURES if 64BIT
help
Select this to enable optimizations for IBM zEC12 (2827 series). The
kernel will be slightly faster but will not work on older machines.
endchoice
config 64BIT
@ -426,6 +437,53 @@ config QDIO
If unsure, say Y.
menuconfig PCI
bool "PCI support"
default n
depends on 64BIT
select ARCH_SUPPORTS_MSI
select PCI_MSI
help
Enable PCI support.
if PCI
config PCI_NR_FUNCTIONS
int "Maximum number of PCI functions (1-4096)"
range 1 4096
default "64"
help
This allows you to specify the maximum number of PCI functions which
this kernel will support.
source "drivers/pci/Kconfig"
source "drivers/pci/pcie/Kconfig"
source "drivers/pci/hotplug/Kconfig"
endif # PCI
config PCI_DOMAINS
def_bool PCI
config HAS_IOMEM
def_bool PCI
config IOMMU_HELPER
def_bool PCI
config HAS_DMA
def_bool PCI
select HAVE_DMA_API_DEBUG
config NEED_SG_DMA_LENGTH
def_bool PCI
config HAVE_DMA_ATTRS
def_bool PCI
config NEED_DMA_MAP_STATE
def_bool PCI
config CHSC_SCH
def_tristate m
prompt "Support for CHSC subchannels"

View File

@ -41,6 +41,7 @@ cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109)
cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10)
cflags-$(CONFIG_MARCH_Z196) += $(call cc-option,-march=z196)
cflags-$(CONFIG_MARCH_ZEC12) += $(call cc-option,-march=zEC12)
#KBUILD_IMAGE is necessary for make rpm
KBUILD_IMAGE :=arch/s390/boot/image

View File

@ -325,7 +325,8 @@ static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, param, out, in, n);
BUG_ON((ret < 0) || (ret != n));
if (ret < 0 || ret != n)
return -EIO;
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@ -457,7 +458,8 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, param, out, in, n);
BUG_ON((ret < 0) || (ret != n));
if (ret < 0 || ret != n)
return -EIO;
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@ -625,7 +627,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
param = xts_ctx->pcc.key + offset;
ret = crypt_s390_pcc(func, param);
BUG_ON(ret < 0);
if (ret < 0)
return -EIO;
memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
param = xts_ctx->key + offset;
@ -636,7 +639,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
in = walk->src.virt.addr;
ret = crypt_s390_km(func, param, out, in, n);
BUG_ON(ret < 0 || ret != n);
if (ret < 0 || ret != n)
return -EIO;
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@ -769,7 +773,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
}
ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
BUG_ON(ret < 0 || ret != n);
if (ret < 0 || ret != n)
return -EIO;
if (n > AES_BLOCK_SIZE)
memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
@ -788,7 +793,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, sctx->key, buf, in,
AES_BLOCK_SIZE, ctrblk);
BUG_ON(ret < 0 || ret != AES_BLOCK_SIZE);
if (ret < 0 || ret != AES_BLOCK_SIZE)
return -EIO;
memcpy(out, buf, nbytes);
crypto_inc(ctrblk, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);

View File

@ -94,7 +94,8 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, key, out, in, n);
BUG_ON((ret < 0) || (ret != n));
if (ret < 0 || ret != n)
return -EIO;
nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@ -120,7 +121,8 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, iv, out, in, n);
BUG_ON((ret < 0) || (ret != n));
if (ret < 0 || ret != n)
return -EIO;
nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@ -386,7 +388,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
}
ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
BUG_ON((ret < 0) || (ret != n));
if (ret < 0 || ret != n)
return -EIO;
if (n > DES_BLOCK_SIZE)
memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
DES_BLOCK_SIZE);
@ -404,7 +407,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, ctx->key, buf, in,
DES_BLOCK_SIZE, ctrblk);
BUG_ON(ret < 0 || ret != DES_BLOCK_SIZE);
if (ret < 0 || ret != DES_BLOCK_SIZE)
return -EIO;
memcpy(out, buf, nbytes);
crypto_inc(ctrblk, DES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);

View File

@ -72,14 +72,16 @@ static int ghash_update(struct shash_desc *desc,
if (!dctx->bytes) {
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
GHASH_BLOCK_SIZE);
BUG_ON(ret != GHASH_BLOCK_SIZE);
if (ret != GHASH_BLOCK_SIZE)
return -EIO;
}
}
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
if (n) {
ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
BUG_ON(ret != n);
if (ret != n)
return -EIO;
src += n;
srclen -= n;
}
@ -92,7 +94,7 @@ static int ghash_update(struct shash_desc *desc,
return 0;
}
static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
{
u8 *buf = dctx->buffer;
int ret;
@ -103,21 +105,24 @@ static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
memset(pos, 0, dctx->bytes);
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
BUG_ON(ret != GHASH_BLOCK_SIZE);
if (ret != GHASH_BLOCK_SIZE)
return -EIO;
}
dctx->bytes = 0;
return 0;
}
static int ghash_final(struct shash_desc *desc, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
int ret;
ghash_flush(ctx, dctx);
memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
return 0;
ret = ghash_flush(ctx, dctx);
if (!ret)
memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
return ret;
}
static struct shash_alg ghash_alg = {

View File

@ -36,7 +36,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
if (index) {
memcpy(ctx->buf + index, data, bsize - index);
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize);
BUG_ON(ret != bsize);
if (ret != bsize)
return -EIO;
data += bsize - index;
len -= bsize - index;
index = 0;
@ -46,7 +47,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
if (len >= bsize) {
ret = crypt_s390_kimd(ctx->func, ctx->state, data,
len & ~(bsize - 1));
BUG_ON(ret != (len & ~(bsize - 1)));
if (ret != (len & ~(bsize - 1)))
return -EIO;
data += ret;
len -= ret;
}
@ -88,7 +90,8 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end);
BUG_ON(ret != end);
if (ret != end)
return -EIO;
/* copy digest to out */
memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));

View File

@ -640,6 +640,87 @@ static inline unsigned long find_first_bit(const unsigned long * addr,
}
#define find_first_bit find_first_bit
/*
* Big endian variant whichs starts bit counting from left using
* the flogr (find leftmost one) instruction.
*/
static inline unsigned long __flo_word(unsigned long nr, unsigned long val)
{
register unsigned long bit asm("2") = val;
register unsigned long out asm("3");
asm volatile (
" .insn rre,0xb9830000,%[bit],%[bit]\n"
: [bit] "+d" (bit), [out] "=d" (out) : : "cc");
return nr + bit;
}
/*
* 64 bit special left bitops format:
* order in memory:
* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f
* 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
* 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f
* 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f
* after that follows the next long with bit numbers
* 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f
* 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f
* 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f
* 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f
* The reason for this bit ordering is the fact that
* the hardware sets bits in a bitmap starting at bit 0
* and we don't want to scan the bitmap from the 'wrong
* end'.
*/
static inline unsigned long find_first_bit_left(const unsigned long *addr,
unsigned long size)
{
unsigned long bytes, bits;
if (!size)
return 0;
bytes = __ffs_word_loop(addr, size);
bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes));
return (bits < size) ? bits : size;
}
static inline int find_next_bit_left(const unsigned long *addr,
unsigned long size,
unsigned long offset)
{
const unsigned long *p;
unsigned long bit, set;
if (offset >= size)
return size;
bit = offset & (__BITOPS_WORDSIZE - 1);
offset -= bit;
size -= offset;
p = addr + offset / __BITOPS_WORDSIZE;
if (bit) {
set = __flo_word(0, *p & (~0UL << bit));
if (set >= size)
return size + offset;
if (set < __BITOPS_WORDSIZE)
return set + offset;
offset += __BITOPS_WORDSIZE;
size -= __BITOPS_WORDSIZE;
p++;
}
return offset + find_first_bit_left(p, size);
}
#define for_each_set_bit_left(bit, addr, size) \
for ((bit) = find_first_bit_left((addr), (size)); \
(bit) < (size); \
(bit) = find_next_bit_left((addr), (size), (bit) + 1))
/* same as for_each_set_bit() but use bit as value to start with */
#define for_each_set_bit_left_cont(bit, addr, size) \
for ((bit) = find_next_bit_left((addr), (size), (bit)); \
(bit) < (size); \
(bit) = find_next_bit_left((addr), (size), (bit) + 1))
/**
* find_next_zero_bit - find the first zero bit in a memory region
* @addr: The address to base the search on

View File

@ -18,6 +18,9 @@ struct irb;
struct ccw1;
struct ccw_dev_id;
/* from asm/schid.h */
struct subchannel_id;
/* simplified initializers for struct ccw_device:
* CCW_DEVICE and CCW_DEVICE_DEVTYPE initialize one
* entry in your MODULE_DEVICE_TABLE and set the match_flag correctly */
@ -223,8 +226,7 @@ extern int ccw_device_force_console(void);
int ccw_device_siosl(struct ccw_device *);
// FIXME: these have to go
extern int _ccw_device_get_subchannel_number(struct ccw_device *);
extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *);
extern void *ccw_device_get_chp_desc(struct ccw_device *, int);
#endif /* _S390_CCWDEV_H_ */

View File

@ -59,6 +59,9 @@ extern void ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver);
int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv,
int num_devices, const char *buf);
extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);
extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);

View File

@ -0,0 +1,28 @@
#ifndef _ASM_S390_CLP_H
#define _ASM_S390_CLP_H
/* CLP common request & response block size */
#define CLP_BLK_SIZE (PAGE_SIZE * 2)
struct clp_req_hdr {
u16 len;
u16 cmd;
} __packed;
struct clp_rsp_hdr {
u16 len;
u16 rsp;
} __packed;
/* CLP Response Codes */
#define CLP_RC_OK 0x0010 /* Command request successfully */
#define CLP_RC_CMD 0x0020 /* Command code not recognized */
#define CLP_RC_PERM 0x0030 /* Command not authorized */
#define CLP_RC_FMT 0x0040 /* Invalid command request format */
#define CLP_RC_LEN 0x0050 /* Invalid command request length */
#define CLP_RC_8K 0x0060 /* Command requires 8K LPCB */
#define CLP_RC_RESNOT0 0x0070 /* Reserved field not zero */
#define CLP_RC_NODATA 0x0080 /* No data available */
#define CLP_RC_FC_UNKNOWN 0x0100 /* Function code not recognized */
#endif

View File

@ -0,0 +1,76 @@
#ifndef _ASM_S390_DMA_MAPPING_H
#define _ASM_S390_DMA_MAPPING_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
#include <linux/io.h>
#define DMA_ERROR_CODE (~(dma_addr_t) 0x0)
extern struct dma_map_ops s390_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &s390_dma_ops;
}
extern int dma_set_mask(struct device *dev, u64 mask);
extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#include <asm-generic/dma-mapping-common.h>
static inline int dma_supported(struct device *dev, u64 mask)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
if (dma_ops->dma_supported == NULL)
return 1;
return dma_ops->dma_supported(dev, mask);
}
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
return 0;
return addr + size - 1 <= *dev->dma_mask;
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
if (dma_ops->mapping_error)
return dma_ops->mapping_error(dev, dma_addr);
return (dma_addr == 0UL);
}
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *ret;
ret = ops->alloc(dev, size, dma_handle, flag, NULL);
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
return ret;
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
dma_ops->free(dev, size, cpu_addr, dma_handle, NULL);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
}
#endif /* _ASM_S390_DMA_MAPPING_H */

View File

@ -1,14 +1,13 @@
#ifndef _ASM_S390_DMA_H
#define _ASM_S390_DMA_H
#include <asm/io.h>
/*
* S390 version
* MAX_DMA_ADDRESS is ambiguous because on s390 its completely unrelated
* to DMA. It _is_ used for the s390 memory zone split at 2GB caused
* by the 31 bit heritage.
*/
#ifndef _ASM_DMA_H
#define _ASM_DMA_H
#include <asm/io.h> /* need byte IO */
#define MAX_DMA_ADDRESS 0x80000000
#define free_dma(x) do { } while (0)
#endif /* _ASM_DMA_H */
#endif /* _ASM_S390_DMA_H */

View File

@ -0,0 +1,22 @@
#ifndef _HW_IRQ_H
#define _HW_IRQ_H
#include <linux/msi.h>
#include <linux/pci.h>
static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
{
return __irq_get_msi_desc(irq);
}
/* Must be called with msi map lock held */
static inline int irq_set_msi_desc(unsigned int irq, struct msi_desc *msi)
{
if (!msi)
return -EINVAL;
msi->irq = irq;
return 0;
}
#endif

View File

@ -9,9 +9,9 @@
#ifndef _S390_IO_H
#define _S390_IO_H
#include <linux/kernel.h>
#include <asm/page.h>
#define IO_SPACE_LIMIT 0xffffffff
#include <asm/pci_io.h>
/*
* Change virtual addresses to physical addresses and vv.
@ -24,10 +24,11 @@ static inline unsigned long virt_to_phys(volatile void * address)
" lra %0,0(%1)\n"
" jz 0f\n"
" la %0,0\n"
"0:"
"0:"
: "=a" (real_address) : "a" (address) : "cc");
return real_address;
return real_address;
}
#define virt_to_phys virt_to_phys
static inline void * phys_to_virt(unsigned long address)
{
@ -42,4 +43,50 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
*/
#define xlate_dev_kmem_ptr(p) p
#define IO_SPACE_LIMIT 0
#ifdef CONFIG_PCI
#define ioremap_nocache(addr, size) ioremap(addr, size)
#define ioremap_wc ioremap_nocache
/* TODO: s390 cannot support io_remap_pfn_range... */
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{
return (void __iomem *) offset;
}
static inline void iounmap(volatile void __iomem *addr)
{
}
/*
* s390 needs a private implementation of pci_iomap since ioremap with its
* offset parameter isn't sufficient. That's because BAR spaces are not
* disjunctive on s390 so we need the bar parameter of pci_iomap to find
* the corresponding device and create the mapping cookie.
*/
#define pci_iomap pci_iomap
#define pci_iounmap pci_iounmap
#define memcpy_fromio(dst, src, count) zpci_memcpy_fromio(dst, src, count)
#define memcpy_toio(dst, src, count) zpci_memcpy_toio(dst, src, count)
#define memset_io(dst, val, count) zpci_memset_io(dst, val, count)
#define __raw_readb zpci_read_u8
#define __raw_readw zpci_read_u16
#define __raw_readl zpci_read_u32
#define __raw_readq zpci_read_u64
#define __raw_writeb zpci_write_u8
#define __raw_writew zpci_write_u16
#define __raw_writel zpci_write_u32
#define __raw_writeq zpci_write_u64
#endif /* CONFIG_PCI */
#include <asm-generic/io.h>
#endif

View File

@ -33,6 +33,8 @@ enum interruption_class {
IOINT_APB,
IOINT_ADM,
IOINT_CSC,
IOINT_PCI,
IOINT_MSI,
NMI_NMI,
NR_IRQS,
};
@ -51,4 +53,14 @@ void service_subclass_irq_unregister(void);
void measurement_alert_subclass_register(void);
void measurement_alert_subclass_unregister(void);
#ifdef CONFIG_LOCKDEP
# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
# define disable_irq_nosync_lockdep_irqsave(irq, flags) \
disable_irq_nosync(irq)
# define disable_irq_lockdep(irq) disable_irq(irq)
# define enable_irq_lockdep(irq) enable_irq(irq)
# define enable_irq_lockdep_irqrestore(irq, flags) \
enable_irq(irq)
#endif
#endif /* _ASM_IRQ_H */

View File

@ -18,6 +18,7 @@
#define CHSC_SCH_ISC 7 /* CHSC subchannels */
/* Adapter interrupts. */
#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */
#define PCI_ISC 2 /* PCI I/O subchannels */
#define AP_ISC 6 /* adjunct processor (crypto) devices */
/* Functions for registration of I/O interruption subclasses */

View File

@ -30,6 +30,8 @@
#include <asm/setup.h>
#ifndef __ASSEMBLY__
void storage_key_init_range(unsigned long start, unsigned long end);
static unsigned long pfmf(unsigned long function, unsigned long address)
{
asm volatile(

View File

@ -1,10 +1,158 @@
#ifndef __ASM_S390_PCI_H
#define __ASM_S390_PCI_H
/* S/390 systems don't have a PCI bus. This file is just here because some stupid .c code
* includes it even if CONFIG_PCI is not set.
*/
/* must be set before including asm-generic/pci.h */
#define PCI_DMA_BUS_IS_PHYS (0)
/* must be set before including pci_clp.h */
#define PCI_BAR_COUNT 6
#endif /* __ASM_S390_PCI_H */
#include <asm-generic/pci.h>
#include <asm-generic/pci-dma-compat.h>
#include <asm/pci_clp.h>
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
#define pcibios_assign_all_busses() (0)
void __iomem *pci_iomap(struct pci_dev *, int, unsigned long);
void pci_iounmap(struct pci_dev *, void __iomem *);
int pci_domain_nr(struct pci_bus *);
int pci_proc_domain(struct pci_bus *);
/* MSI arch hooks */
#define arch_setup_msi_irqs arch_setup_msi_irqs
#define arch_teardown_msi_irqs arch_teardown_msi_irqs
#define ZPCI_BUS_NR 0 /* default bus number */
#define ZPCI_DEVFN 0 /* default device number */
/* PCI Function Controls */
#define ZPCI_FC_FN_ENABLED 0x80
#define ZPCI_FC_ERROR 0x40
#define ZPCI_FC_BLOCKED 0x20
#define ZPCI_FC_DMA_ENABLED 0x10
struct msi_map {
unsigned long irq;
struct msi_desc *msi;
struct hlist_node msi_chain;
};
#define ZPCI_NR_MSI_VECS 64
#define ZPCI_MSI_MASK (ZPCI_NR_MSI_VECS - 1)
enum zpci_state {
ZPCI_FN_STATE_RESERVED,
ZPCI_FN_STATE_STANDBY,
ZPCI_FN_STATE_CONFIGURED,
ZPCI_FN_STATE_ONLINE,
NR_ZPCI_FN_STATES,
};
struct zpci_bar_struct {
u32 val; /* bar start & 3 flag bits */
u8 size; /* order 2 exponent */
u16 map_idx; /* index into bar mapping array */
};
/* Private data per function */
struct zpci_dev {
struct pci_dev *pdev;
struct pci_bus *bus;
struct list_head entry; /* list of all zpci_devices, needed for hotplug, etc. */
enum zpci_state state;
u32 fid; /* function ID, used by sclp */
u32 fh; /* function handle, used by insn's */
u16 pchid; /* physical channel ID */
u8 pfgid; /* function group ID */
u16 domain;
/* IRQ stuff */
u64 msi_addr; /* MSI address */
struct zdev_irq_map *irq_map;
struct msi_map *msi_map[ZPCI_NR_MSI_VECS];
unsigned int aisb; /* number of the summary bit */
/* DMA stuff */
unsigned long *dma_table;
spinlock_t dma_table_lock;
int tlb_refresh;
spinlock_t iommu_bitmap_lock;
unsigned long *iommu_bitmap;
unsigned long iommu_size;
unsigned long iommu_pages;
unsigned int next_bit;
struct zpci_bar_struct bars[PCI_BAR_COUNT];
u64 start_dma; /* Start of available DMA addresses */
u64 end_dma; /* End of available DMA addresses */
u64 dma_mask; /* DMA address space mask */
enum pci_bus_speed max_bus_speed;
};
struct pci_hp_callback_ops {
int (*create_slot) (struct zpci_dev *zdev);
void (*remove_slot) (struct zpci_dev *zdev);
};
static inline bool zdev_enabled(struct zpci_dev *zdev)
{
return (zdev->fh & (1UL << 31)) ? true : false;
}
/* -----------------------------------------------------------------------------
Prototypes
----------------------------------------------------------------------------- */
/* Base stuff */
struct zpci_dev *zpci_alloc_device(void);
int zpci_create_device(struct zpci_dev *);
int zpci_enable_device(struct zpci_dev *);
void zpci_stop_device(struct zpci_dev *);
void zpci_free_device(struct zpci_dev *);
int zpci_scan_device(struct zpci_dev *);
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
int zpci_unregister_ioat(struct zpci_dev *, u8);
/* CLP */
int clp_find_pci_devices(void);
int clp_add_pci_device(u32, u32, int);
int clp_enable_fh(struct zpci_dev *, u8);
int clp_disable_fh(struct zpci_dev *);
/* MSI */
struct msi_desc *__irq_get_msi_desc(unsigned int);
int zpci_msi_set_mask_bits(struct msi_desc *, u32, u32);
int zpci_setup_msi_irq(struct zpci_dev *, struct msi_desc *, unsigned int, int);
void zpci_teardown_msi_irq(struct zpci_dev *, struct msi_desc *);
int zpci_msihash_init(void);
void zpci_msihash_exit(void);
/* Error handling and recovery */
void zpci_event_error(void *);
void zpci_event_availability(void *);
/* Helpers */
struct zpci_dev *get_zdev(struct pci_dev *);
struct zpci_dev *get_zdev_by_fid(u32);
bool zpci_fid_present(u32);
/* sysfs */
int zpci_sysfs_add_device(struct device *);
void zpci_sysfs_remove_device(struct device *);
/* DMA */
int zpci_dma_init(void);
void zpci_dma_exit(void);
/* Hotplug */
extern struct mutex zpci_list_lock;
extern struct list_head zpci_list;
extern struct pci_hp_callback_ops hotplug_ops;
extern unsigned int pci_probe;
#endif

View File

@ -0,0 +1,182 @@
#ifndef _ASM_S390_PCI_CLP_H
#define _ASM_S390_PCI_CLP_H
#include <asm/clp.h>
/*
* Call Logical Processor - Command Codes
*/
#define CLP_LIST_PCI 0x0002
#define CLP_QUERY_PCI_FN 0x0003
#define CLP_QUERY_PCI_FNGRP 0x0004
#define CLP_SET_PCI_FN 0x0005
/* PCI function handle list entry */
struct clp_fh_list_entry {
u16 device_id;
u16 vendor_id;
u32 config_state : 1;
u32 : 31;
u32 fid; /* PCI function id */
u32 fh; /* PCI function handle */
} __packed;
#define CLP_RC_SETPCIFN_FH 0x0101 /* Invalid PCI fn handle */
#define CLP_RC_SETPCIFN_FHOP 0x0102 /* Fn handle not valid for op */
#define CLP_RC_SETPCIFN_DMAAS 0x0103 /* Invalid DMA addr space */
#define CLP_RC_SETPCIFN_RES 0x0104 /* Insufficient resources */
#define CLP_RC_SETPCIFN_ALRDY 0x0105 /* Fn already in requested state */
#define CLP_RC_SETPCIFN_ERR 0x0106 /* Fn in permanent error state */
#define CLP_RC_SETPCIFN_RECPND 0x0107 /* Error recovery pending */
#define CLP_RC_SETPCIFN_BUSY 0x0108 /* Fn busy */
#define CLP_RC_LISTPCI_BADRT 0x010a /* Resume token not recognized */
#define CLP_RC_QUERYPCIFG_PFGID 0x010b /* Unrecognized PFGID */
/* request or response block header length */
#define LIST_PCI_HDR_LEN 32
/* Number of function handles fitting in response block */
#define CLP_FH_LIST_NR_ENTRIES \
((CLP_BLK_SIZE - 2 * LIST_PCI_HDR_LEN) \
/ sizeof(struct clp_fh_list_entry))
#define CLP_SET_ENABLE_PCI_FN 0 /* Yes, 0 enables it */
#define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */
#define CLP_UTIL_STR_LEN 64
/* List PCI functions request */
struct clp_req_list_pci {
struct clp_req_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u64 resume_token;
u64 reserved2;
} __packed;
/* List PCI functions response */
struct clp_rsp_list_pci {
struct clp_rsp_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u64 resume_token;
u32 reserved2;
u16 max_fn;
u8 reserved3;
u8 entry_size;
struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES];
} __packed;
/* Query PCI function request */
struct clp_req_query_pci {
struct clp_req_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u32 fh; /* function handle */
u32 reserved2;
u64 reserved3;
} __packed;
/* Query PCI function response */
struct clp_rsp_query_pci {
struct clp_rsp_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u16 vfn; /* virtual fn number */
u16 : 7;
u16 util_str_avail : 1; /* utility string available? */
u16 pfgid : 8; /* pci function group id */
u32 fid; /* pci function id */
u8 bar_size[PCI_BAR_COUNT];
u16 pchid;
u32 bar[PCI_BAR_COUNT];
u64 reserved2;
u64 sdma; /* start dma as */
u64 edma; /* end dma as */
u64 reserved3[6];
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
} __packed;
/* Query PCI function group request */
struct clp_req_query_pci_grp {
struct clp_req_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u32 : 24;
u32 pfgid : 8; /* function group id */
u32 reserved2;
u64 reserved3;
} __packed;
/* Query PCI function group response */
struct clp_rsp_query_pci_grp {
struct clp_rsp_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u16 : 4;
u16 noi : 12; /* number of interrupts */
u8 version;
u8 : 6;
u8 frame : 1;
u8 refresh : 1; /* TLB refresh mode */
u16 reserved2;
u16 mui;
u64 reserved3;
u64 dasm; /* dma address space mask */
u64 msia; /* MSI address */
u64 reserved4;
u64 reserved5;
} __packed;
/* Set PCI function request */
struct clp_req_set_pci {
struct clp_req_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u32 fh; /* function handle */
u16 reserved2;
u8 oc; /* operation controls */
u8 ndas; /* number of dma spaces */
u64 reserved3;
} __packed;
/* Set PCI function response */
struct clp_rsp_set_pci {
struct clp_rsp_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u32 fh; /* function handle */
u32 reserved3;
u64 reserved4;
} __packed;
/* Combined request/response block structures used by clp insn */
struct clp_req_rsp_list_pci {
struct clp_req_list_pci request;
struct clp_rsp_list_pci response;
} __packed;
struct clp_req_rsp_set_pci {
struct clp_req_set_pci request;
struct clp_rsp_set_pci response;
} __packed;
struct clp_req_rsp_query_pci {
struct clp_req_query_pci request;
struct clp_rsp_query_pci response;
} __packed;
struct clp_req_rsp_query_pci_grp {
struct clp_req_query_pci_grp request;
struct clp_rsp_query_pci_grp response;
} __packed;
#endif

View File

@ -0,0 +1,196 @@
#ifndef _ASM_S390_PCI_DMA_H
#define _ASM_S390_PCI_DMA_H
/* I/O Translation Anchor (IOTA) */
enum zpci_ioat_dtype {
ZPCI_IOTA_STO = 0,
ZPCI_IOTA_RTTO = 1,
ZPCI_IOTA_RSTO = 2,
ZPCI_IOTA_RFTO = 3,
ZPCI_IOTA_PFAA = 4,
ZPCI_IOTA_IOPFAA = 5,
ZPCI_IOTA_IOPTO = 7
};
#define ZPCI_IOTA_IOT_ENABLED 0x800UL
#define ZPCI_IOTA_DT_ST (ZPCI_IOTA_STO << 2)
#define ZPCI_IOTA_DT_RT (ZPCI_IOTA_RTTO << 2)
#define ZPCI_IOTA_DT_RS (ZPCI_IOTA_RSTO << 2)
#define ZPCI_IOTA_DT_RF (ZPCI_IOTA_RFTO << 2)
#define ZPCI_IOTA_DT_PF (ZPCI_IOTA_PFAA << 2)
#define ZPCI_IOTA_FS_4K 0
#define ZPCI_IOTA_FS_1M 1
#define ZPCI_IOTA_FS_2G 2
#define ZPCI_KEY (PAGE_DEFAULT_KEY << 5)
#define ZPCI_IOTA_STO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST)
#define ZPCI_IOTA_RTTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT)
#define ZPCI_IOTA_RSTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RS)
#define ZPCI_IOTA_RFTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RF)
#define ZPCI_IOTA_RFAA_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_PF | ZPCI_IOTA_FS_2G)
/* I/O Region and segment tables */
#define ZPCI_INDEX_MASK 0x7ffUL
#define ZPCI_TABLE_TYPE_MASK 0xc
#define ZPCI_TABLE_TYPE_RFX 0xc
#define ZPCI_TABLE_TYPE_RSX 0x8
#define ZPCI_TABLE_TYPE_RTX 0x4
#define ZPCI_TABLE_TYPE_SX 0x0
#define ZPCI_TABLE_LEN_RFX 0x3
#define ZPCI_TABLE_LEN_RSX 0x3
#define ZPCI_TABLE_LEN_RTX 0x3
#define ZPCI_TABLE_OFFSET_MASK 0xc0
#define ZPCI_TABLE_SIZE 0x4000
#define ZPCI_TABLE_ALIGN ZPCI_TABLE_SIZE
#define ZPCI_TABLE_ENTRY_SIZE (sizeof(unsigned long))
#define ZPCI_TABLE_ENTRIES (ZPCI_TABLE_SIZE / ZPCI_TABLE_ENTRY_SIZE)
#define ZPCI_TABLE_BITS 11
#define ZPCI_PT_BITS 8
#define ZPCI_ST_SHIFT (ZPCI_PT_BITS + PAGE_SHIFT)
#define ZPCI_RT_SHIFT (ZPCI_ST_SHIFT + ZPCI_TABLE_BITS)
#define ZPCI_RTE_FLAG_MASK 0x3fffUL
#define ZPCI_RTE_ADDR_MASK (~ZPCI_RTE_FLAG_MASK)
#define ZPCI_STE_FLAG_MASK 0x7ffUL
#define ZPCI_STE_ADDR_MASK (~ZPCI_STE_FLAG_MASK)
/* I/O Page tables */
#define ZPCI_PTE_VALID_MASK 0x400
#define ZPCI_PTE_INVALID 0x400
#define ZPCI_PTE_VALID 0x000
#define ZPCI_PT_SIZE 0x800
#define ZPCI_PT_ALIGN ZPCI_PT_SIZE
#define ZPCI_PT_ENTRIES (ZPCI_PT_SIZE / ZPCI_TABLE_ENTRY_SIZE)
#define ZPCI_PT_MASK (ZPCI_PT_ENTRIES - 1)
#define ZPCI_PTE_FLAG_MASK 0xfffUL
#define ZPCI_PTE_ADDR_MASK (~ZPCI_PTE_FLAG_MASK)
/* Shared bits */
#define ZPCI_TABLE_VALID 0x00
#define ZPCI_TABLE_INVALID 0x20
#define ZPCI_TABLE_PROTECTED 0x200
#define ZPCI_TABLE_UNPROTECTED 0x000
#define ZPCI_TABLE_VALID_MASK 0x20
#define ZPCI_TABLE_PROT_MASK 0x200
static inline unsigned int calc_rtx(dma_addr_t ptr)
{
return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
}
static inline unsigned int calc_sx(dma_addr_t ptr)
{
return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
}
static inline unsigned int calc_px(dma_addr_t ptr)
{
return ((unsigned long) ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
}
static inline void set_pt_pfaa(unsigned long *entry, void *pfaa)
{
*entry &= ZPCI_PTE_FLAG_MASK;
*entry |= ((unsigned long) pfaa & ZPCI_PTE_ADDR_MASK);
}
static inline void set_rt_sto(unsigned long *entry, void *sto)
{
*entry &= ZPCI_RTE_FLAG_MASK;
*entry |= ((unsigned long) sto & ZPCI_RTE_ADDR_MASK);
*entry |= ZPCI_TABLE_TYPE_RTX;
}
static inline void set_st_pto(unsigned long *entry, void *pto)
{
*entry &= ZPCI_STE_FLAG_MASK;
*entry |= ((unsigned long) pto & ZPCI_STE_ADDR_MASK);
*entry |= ZPCI_TABLE_TYPE_SX;
}
static inline void validate_rt_entry(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_VALID_MASK;
*entry &= ~ZPCI_TABLE_OFFSET_MASK;
*entry |= ZPCI_TABLE_VALID;
*entry |= ZPCI_TABLE_LEN_RTX;
}
static inline void validate_st_entry(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_VALID_MASK;
*entry |= ZPCI_TABLE_VALID;
}
static inline void invalidate_table_entry(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_VALID_MASK;
*entry |= ZPCI_TABLE_INVALID;
}
static inline void invalidate_pt_entry(unsigned long *entry)
{
WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID);
*entry &= ~ZPCI_PTE_VALID_MASK;
*entry |= ZPCI_PTE_INVALID;
}
static inline void validate_pt_entry(unsigned long *entry)
{
WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID);
*entry &= ~ZPCI_PTE_VALID_MASK;
*entry |= ZPCI_PTE_VALID;
}
static inline void entry_set_protected(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_PROT_MASK;
*entry |= ZPCI_TABLE_PROTECTED;
}
static inline void entry_clr_protected(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_PROT_MASK;
*entry |= ZPCI_TABLE_UNPROTECTED;
}
static inline int reg_entry_isvalid(unsigned long entry)
{
return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
}
static inline int pt_entry_isvalid(unsigned long entry)
{
return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
}
static inline int entry_isprotected(unsigned long entry)
{
return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED;
}
static inline unsigned long *get_rt_sto(unsigned long entry)
{
return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
? (unsigned long *) (entry & ZPCI_RTE_ADDR_MASK)
: NULL;
}
static inline unsigned long *get_st_pto(unsigned long entry)
{
return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
? (unsigned long *) (entry & ZPCI_STE_ADDR_MASK)
: NULL;
}
/* Prototypes */
int zpci_dma_init_device(struct zpci_dev *);
void zpci_dma_exit_device(struct zpci_dev *);
#endif

View File

@ -0,0 +1,280 @@
#ifndef _ASM_S390_PCI_INSN_H
#define _ASM_S390_PCI_INSN_H
#include <linux/delay.h>
#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
/* Load/Store status codes */
#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
#define ZPCI_PCI_ST_FUNC_IN_ERR 8
#define ZPCI_PCI_ST_BLOCKED 12
#define ZPCI_PCI_ST_INSUF_RES 16
#define ZPCI_PCI_ST_INVAL_AS 20
#define ZPCI_PCI_ST_FUNC_ALREADY_ENABLED 24
#define ZPCI_PCI_ST_DMA_AS_NOT_ENABLED 28
#define ZPCI_PCI_ST_2ND_OP_IN_INV_AS 36
#define ZPCI_PCI_ST_FUNC_NOT_AVAIL 40
#define ZPCI_PCI_ST_ALREADY_IN_RQ_STATE 44
/* Load/Store return codes */
#define ZPCI_PCI_LS_OK 0
#define ZPCI_PCI_LS_ERR 1
#define ZPCI_PCI_LS_BUSY 2
#define ZPCI_PCI_LS_INVAL_HANDLE 3
/* Load/Store address space identifiers */
#define ZPCI_PCIAS_MEMIO_0 0
#define ZPCI_PCIAS_MEMIO_1 1
#define ZPCI_PCIAS_MEMIO_2 2
#define ZPCI_PCIAS_MEMIO_3 3
#define ZPCI_PCIAS_MEMIO_4 4
#define ZPCI_PCIAS_MEMIO_5 5
#define ZPCI_PCIAS_CFGSPC 15
/* Modify PCI Function Controls */
#define ZPCI_MOD_FC_REG_INT 2
#define ZPCI_MOD_FC_DEREG_INT 3
#define ZPCI_MOD_FC_REG_IOAT 4
#define ZPCI_MOD_FC_DEREG_IOAT 5
#define ZPCI_MOD_FC_REREG_IOAT 6
#define ZPCI_MOD_FC_RESET_ERROR 7
#define ZPCI_MOD_FC_RESET_BLOCK 9
#define ZPCI_MOD_FC_SET_MEASURE 10
/* FIB function controls */
#define ZPCI_FIB_FC_ENABLED 0x80
#define ZPCI_FIB_FC_ERROR 0x40
#define ZPCI_FIB_FC_LS_BLOCKED 0x20
#define ZPCI_FIB_FC_DMAAS_REG 0x10
/* FIB function controls */
#define ZPCI_FIB_FC_ENABLED 0x80
#define ZPCI_FIB_FC_ERROR 0x40
#define ZPCI_FIB_FC_LS_BLOCKED 0x20
#define ZPCI_FIB_FC_DMAAS_REG 0x10
/* Function Information Block */
struct zpci_fib {
u32 fmt : 8; /* format */
u32 : 24;
u32 reserved1;
u8 fc; /* function controls */
u8 reserved2;
u16 reserved3;
u32 reserved4;
u64 pba; /* PCI base address */
u64 pal; /* PCI address limit */
u64 iota; /* I/O Translation Anchor */
u32 : 1;
u32 isc : 3; /* Interrupt subclass */
u32 noi : 12; /* Number of interrupts */
u32 : 2;
u32 aibvo : 6; /* Adapter interrupt bit vector offset */
u32 sum : 1; /* Adapter int summary bit enabled */
u32 : 1;
u32 aisbo : 6; /* Adapter int summary bit offset */
u32 reserved5;
u64 aibv; /* Adapter int bit vector address */
u64 aisb; /* Adapter int summary bit address */
u64 fmb_addr; /* Function measurement block address and key */
u64 reserved6;
u64 reserved7;
} __packed;
/* Modify PCI Function Controls */
static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
{
u8 cc;
asm volatile (
" .insn rxy,0xe300000000d0,%[req],%[fib]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
: : "cc");
*status = req >> 24 & 0xff;
return cc;
}
static inline int mpcifc_instr(u64 req, struct zpci_fib *fib)
{
u8 cc, status;
do {
cc = __mpcifc(req, fib, &status);
if (cc == 2)
msleep(ZPCI_INSN_BUSY_DELAY);
} while (cc == 2);
if (cc)
printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
__func__, cc, status);
return (cc) ? -EIO : 0;
}
/* Refresh PCI Translations */
static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
{
register u64 __addr asm("2") = addr;
register u64 __range asm("3") = range;
u8 cc;
asm volatile (
" .insn rre,0xb9d30000,%[fn],%[addr]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [fn] "+d" (fn)
: [addr] "d" (__addr), "d" (__range)
: "cc");
*status = fn >> 24 & 0xff;
return cc;
}
static inline int rpcit_instr(u64 fn, u64 addr, u64 range)
{
u8 cc, status;
do {
cc = __rpcit(fn, addr, range, &status);
if (cc == 2)
udelay(ZPCI_INSN_BUSY_DELAY);
} while (cc == 2);
if (cc)
printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
__func__, cc, status, addr, range);
return (cc) ? -EIO : 0;
}
/* Store PCI function controls */
static inline u8 __stpcifc(u32 handle, u8 space, struct zpci_fib *fib, u8 *status)
{
u64 fn = (u64) handle << 32 | space << 16;
u8 cc;
asm volatile (
" .insn rxy,0xe300000000d4,%[fn],%[fib]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [fn] "+d" (fn), [fib] "=m" (*fib)
: : "cc");
*status = fn >> 24 & 0xff;
return cc;
}
/* Set Interruption Controls */
static inline void sic_instr(u16 ctl, char *unused, u8 isc)
{
asm volatile (
" .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
}
/* PCI Load */
static inline u8 __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
{
register u64 __req asm("2") = req;
register u64 __offset asm("3") = offset;
u64 __data;
u8 cc;
asm volatile (
" .insn rre,0xb9d20000,%[data],%[req]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [data] "=d" (__data), [req] "+d" (__req)
: "d" (__offset)
: "cc");
*status = __req >> 24 & 0xff;
*data = __data;
return cc;
}
static inline int pcilg_instr(u64 *data, u64 req, u64 offset)
{
u8 cc, status;
do {
cc = __pcilg(data, req, offset, &status);
if (cc == 2)
udelay(ZPCI_INSN_BUSY_DELAY);
} while (cc == 2);
if (cc) {
printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
__func__, cc, status, req, offset);
/* TODO: on IO errors set data to 0xff...
* here or in users of pcilg (le conversion)?
*/
}
return (cc) ? -EIO : 0;
}
/* PCI Store */
static inline u8 __pcistg(u64 data, u64 req, u64 offset, u8 *status)
{
register u64 __req asm("2") = req;
register u64 __offset asm("3") = offset;
u8 cc;
asm volatile (
" .insn rre,0xb9d00000,%[data],%[req]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [req] "+d" (__req)
: "d" (__offset), [data] "d" (data)
: "cc");
*status = __req >> 24 & 0xff;
return cc;
}
static inline int pcistg_instr(u64 data, u64 req, u64 offset)
{
u8 cc, status;
do {
cc = __pcistg(data, req, offset, &status);
if (cc == 2)
udelay(ZPCI_INSN_BUSY_DELAY);
} while (cc == 2);
if (cc)
printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
__func__, cc, status, req, offset);
return (cc) ? -EIO : 0;
}
/* PCI Store Block */
static inline u8 __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
{
u8 cc;
asm volatile (
" .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [req] "+d" (req)
: [offset] "d" (offset), [data] "Q" (*data)
: "cc");
*status = req >> 24 & 0xff;
return cc;
}
static inline int pcistb_instr(const u64 *data, u64 req, u64 offset)
{
u8 cc, status;
do {
cc = __pcistb(data, req, offset, &status);
if (cc == 2)
udelay(ZPCI_INSN_BUSY_DELAY);
} while (cc == 2);
if (cc)
printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
__func__, cc, status, req, offset);
return (cc) ? -EIO : 0;
}
#endif

View File

@ -0,0 +1,194 @@
#ifndef _ASM_S390_PCI_IO_H
#define _ASM_S390_PCI_IO_H
#ifdef CONFIG_PCI
#include <linux/kernel.h>
#include <linux/slab.h>
#include <asm/pci_insn.h>
/* I/O Map */
#define ZPCI_IOMAP_MAX_ENTRIES 0x7fff
#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000ULL
#define ZPCI_IOMAP_ADDR_IDX_MASK 0x7fff000000000000ULL
#define ZPCI_IOMAP_ADDR_OFF_MASK 0x0000ffffffffffffULL
struct zpci_iomap_entry {
u32 fh;
u8 bar;
};
extern struct zpci_iomap_entry *zpci_iomap_start;
#define ZPCI_IDX(addr) \
(((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> 48)
#define ZPCI_OFFSET(addr) \
((__force u64) addr & ZPCI_IOMAP_ADDR_OFF_MASK)
#define ZPCI_CREATE_REQ(handle, space, len) \
((u64) handle << 32 | space << 16 | len)
#define zpci_read(LENGTH, RETTYPE) \
static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
{ \
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data; \
int rc; \
\
rc = pcilg_instr(&data, req, ZPCI_OFFSET(addr)); \
if (rc) \
data = -1ULL; \
return (RETTYPE) data; \
}
#define zpci_write(LENGTH, VALTYPE) \
static inline void zpci_write_##VALTYPE(VALTYPE val, \
const volatile void __iomem *addr) \
{ \
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data = (VALTYPE) val; \
\
pcistg_instr(data, req, ZPCI_OFFSET(addr)); \
}
zpci_read(8, u64)
zpci_read(4, u32)
zpci_read(2, u16)
zpci_read(1, u8)
zpci_write(8, u64)
zpci_write(4, u32)
zpci_write(2, u16)
zpci_write(1, u8)
static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len)
{
u64 val;
switch (len) {
case 1:
val = (u64) *((u8 *) data);
break;
case 2:
val = (u64) *((u16 *) data);
break;
case 4:
val = (u64) *((u32 *) data);
break;
case 8:
val = (u64) *((u64 *) data);
break;
default:
val = 0; /* let FW report error */
break;
}
return pcistg_instr(val, req, offset);
}
static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
{
u64 data;
u8 cc;
cc = pcilg_instr(&data, req, offset);
switch (len) {
case 1:
*((u8 *) dst) = (u8) data;
break;
case 2:
*((u16 *) dst) = (u16) data;
break;
case 4:
*((u32 *) dst) = (u32) data;
break;
case 8:
*((u64 *) dst) = (u64) data;
break;
}
return cc;
}
static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
{
return pcistb_instr(data, req, offset);
}
static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
{
int count = len > max ? max : len, size = 1;
while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
dst = dst >> 1;
src = src >> 1;
size = size << 1;
}
return size;
}
static inline int zpci_memcpy_fromio(void *dst,
const volatile void __iomem *src,
unsigned long n)
{
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(src)];
u64 req, offset = ZPCI_OFFSET(src);
int size, rc = 0;
while (n > 0) {
size = zpci_get_max_write_size((u64) src, (u64) dst, n, 8);
req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
rc = zpci_read_single(req, dst, offset, size);
if (rc)
break;
offset += size;
dst += size;
n -= size;
}
return rc;
}
static inline int zpci_memcpy_toio(volatile void __iomem *dst,
const void *src, unsigned long n)
{
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
u64 req, offset = ZPCI_OFFSET(dst);
int size, rc = 0;
if (!src)
return -EINVAL;
while (n > 0) {
size = zpci_get_max_write_size((u64) dst, (u64) src, n, 128);
req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
if (size > 8) /* main path */
rc = zpci_write_block(req, src, offset);
else
rc = zpci_write_single(req, src, offset, size);
if (rc)
break;
offset += size;
src += size;
n -= size;
}
return rc;
}
static inline int zpci_memset_io(volatile void __iomem *dst,
unsigned char val, size_t count)
{
u8 *src = kmalloc(count, GFP_KERNEL);
int rc;
if (src == NULL)
return -ENOMEM;
memset(src, val, count);
rc = zpci_memcpy_toio(dst, src, count);
kfree(src);
return rc;
}
#endif /* CONFIG_PCI */
#endif /* _ASM_S390_PCI_IO_H */

View File

@ -35,7 +35,6 @@
extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
extern void paging_init(void);
extern void vmem_map_init(void);
extern void fault_init(void);
/*
* The S390 doesn't have any external MMU info: the kernel page
@ -336,6 +335,8 @@ extern unsigned long MODULES_END;
#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
/* Bits in the segment table entry */
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
@ -435,6 +436,7 @@ static inline int pgd_bad(pgd_t pgd) { return 0; }
static inline int pud_present(pud_t pud) { return 1; }
static inline int pud_none(pud_t pud) { return 0; }
static inline int pud_large(pud_t pud) { return 0; }
static inline int pud_bad(pud_t pud) { return 0; }
#else /* CONFIG_64BIT */
@ -480,6 +482,13 @@ static inline int pud_none(pud_t pud)
return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
}
static inline int pud_large(pud_t pud)
{
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
return 0;
return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
}
static inline int pud_bad(pud_t pud)
{
/*

View File

@ -55,5 +55,7 @@ int sclp_chp_read_info(struct sclp_chp_info *info);
void sclp_get_ipl_info(struct sclp_ipl_info *info);
bool sclp_has_linemode(void);
bool sclp_has_vt220(void);
int sclp_pci_configure(u32 fid);
int sclp_pci_deconfigure(u32 fid);
#endif /* _ASM_S390_SCLP_H */

View File

@ -8,32 +8,34 @@ struct cpu;
#ifdef CONFIG_SCHED_BOOK
extern unsigned char cpu_socket_id[NR_CPUS];
#define topology_physical_package_id(cpu) (cpu_socket_id[cpu])
struct cpu_topology_s390 {
unsigned short core_id;
unsigned short socket_id;
unsigned short book_id;
cpumask_t core_mask;
cpumask_t book_mask;
};
extern unsigned char cpu_core_id[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
#define mc_capable() 1
static inline const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &cpu_core_map[cpu];
return &cpu_topology[cpu].core_mask;
}
#define topology_core_id(cpu) (cpu_core_id[cpu])
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
#define mc_capable() (1)
extern unsigned char cpu_book_id[NR_CPUS];
extern cpumask_t cpu_book_map[NR_CPUS];
static inline const struct cpumask *cpu_book_mask(int cpu)
{
return &cpu_book_map[cpu];
return &cpu_topology[cpu].book_mask;
}
#define topology_book_id(cpu) (cpu_book_id[cpu])
#define topology_book_cpumask(cpu) (&cpu_book_map[cpu])
int topology_cpu_init(struct cpu *);
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);

View File

@ -0,0 +1,6 @@
#ifndef _ASM_S390_VGA_H
#define _ASM_S390_VGA_H
/* Avoid compile errors due to missing asm/vga.h */
#endif /* _ASM_S390_VGA_H */

View File

@ -23,7 +23,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o
sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)

File diff suppressed because it is too large Load Diff

View File

@ -231,12 +231,12 @@ sysc_work:
jo sysc_mcck_pending
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
jo sysc_reschedule
tm __TI_flags+3(%r12),_TIF_PER_TRAP
jo sysc_singlestep
tm __TI_flags+3(%r12),_TIF_SIGPENDING
jo sysc_sigpending
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
jo sysc_notify_resume
tm __TI_flags+3(%r12),_TIF_PER_TRAP
jo sysc_singlestep
j sysc_return # beware of critical section cleanup
#
@ -259,7 +259,6 @@ sysc_mcck_pending:
# _TIF_SIGPENDING is set, call do_signal
#
sysc_sigpending:
ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
lr %r2,%r11 # pass pointer to pt_regs
l %r1,BASED(.Ldo_signal)
basr %r14,%r1 # call do_signal
@ -286,7 +285,7 @@ sysc_notify_resume:
# _TIF_PER_TRAP is set, call do_per_trap
#
sysc_singlestep:
ni __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP)
ni __TI_flags+3(%r12),255-_TIF_PER_TRAP
lr %r2,%r11 # pass pointer to pt_regs
l %r1,BASED(.Ldo_per_trap)
la %r14,BASED(sysc_return)

View File

@ -6,7 +6,6 @@
#include <asm/ptrace.h>
#include <asm/cputime.h>
extern void (*pgm_check_table[128])(struct pt_regs *);
extern void *restart_stack;
void system_call(void);
@ -25,6 +24,26 @@ void do_protection_exception(struct pt_regs *regs);
void do_dat_exception(struct pt_regs *regs);
void do_asce_exception(struct pt_regs *regs);
void addressing_exception(struct pt_regs *regs);
void data_exception(struct pt_regs *regs);
void default_trap_handler(struct pt_regs *regs);
void divide_exception(struct pt_regs *regs);
void execute_exception(struct pt_regs *regs);
void hfp_divide_exception(struct pt_regs *regs);
void hfp_overflow_exception(struct pt_regs *regs);
void hfp_significance_exception(struct pt_regs *regs);
void hfp_sqrt_exception(struct pt_regs *regs);
void hfp_underflow_exception(struct pt_regs *regs);
void illegal_op(struct pt_regs *regs);
void operand_exception(struct pt_regs *regs);
void overflow_exception(struct pt_regs *regs);
void privileged_op(struct pt_regs *regs);
void space_switch_exception(struct pt_regs *regs);
void special_op_exception(struct pt_regs *regs);
void specification_exception(struct pt_regs *regs);
void transaction_exception(struct pt_regs *regs);
void translation_exception(struct pt_regs *regs);
void do_per_trap(struct pt_regs *regs);
void syscall_trace(struct pt_regs *regs, int entryexit);
void kernel_stack_overflow(struct pt_regs * regs);

View File

@ -80,14 +80,21 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
#endif
.endm
.macro HANDLE_SIE_INTERCEPT scratch
.macro HANDLE_SIE_INTERCEPT scratch,pgmcheck
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
tmhh %r8,0x0001 # interrupting from user ?
jnz .+42
lgr \scratch,%r9
slg \scratch,BASED(.Lsie_loop)
clg \scratch,BASED(.Lsie_length)
.if \pgmcheck
# Some program interrupts are suppressing (e.g. protection).
# We must also check the instruction after SIE in that case.
# do_protection_exception will rewind to rewind_pad
jh .+22
.else
jhe .+22
.endif
lg %r9,BASED(.Lsie_loop)
SPP BASED(.Lhost_id) # set host id
#endif
@ -262,12 +269,12 @@ sysc_work:
jo sysc_mcck_pending
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jo sysc_reschedule
tm __TI_flags+7(%r12),_TIF_PER_TRAP
jo sysc_singlestep
tm __TI_flags+7(%r12),_TIF_SIGPENDING
jo sysc_sigpending
tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
jo sysc_notify_resume
tm __TI_flags+7(%r12),_TIF_PER_TRAP
jo sysc_singlestep
j sysc_return # beware of critical section cleanup
#
@ -288,7 +295,6 @@ sysc_mcck_pending:
# _TIF_SIGPENDING is set, call do_signal
#
sysc_sigpending:
ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_signal
tm __TI_flags+7(%r12),_TIF_SYSCALL
@ -313,7 +319,7 @@ sysc_notify_resume:
# _TIF_PER_TRAP is set, call do_per_trap
#
sysc_singlestep:
ni __TI_flags+7(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP)
ni __TI_flags+7(%r12),255-_TIF_PER_TRAP
lgr %r2,%r11 # pass pointer to pt_regs
larl %r14,sysc_return
jg do_per_trap
@ -375,7 +381,7 @@ ENTRY(pgm_check_handler)
lg %r12,__LC_THREAD_INFO
larl %r13,system_call
lmg %r8,%r9,__LC_PGM_OLD_PSW
HANDLE_SIE_INTERCEPT %r14
HANDLE_SIE_INTERCEPT %r14,1
tmhh %r8,0x0001 # test problem state bit
jnz 1f # -> fault in user space
tmhh %r8,0x4000 # PER bit set in old PSW ?
@ -413,9 +419,9 @@ ENTRY(pgm_check_handler)
larl %r1,pgm_check_table
llgh %r10,__PT_INT_CODE+2(%r11)
nill %r10,0x007f
sll %r10,3
sll %r10,2
je sysc_return
lg %r1,0(%r10,%r1) # load address of handler routine
lgf %r1,0(%r10,%r1) # load address of handler routine
lgr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # branch to interrupt-handler
j sysc_return
@ -451,7 +457,7 @@ ENTRY(io_int_handler)
lg %r12,__LC_THREAD_INFO
larl %r13,system_call
lmg %r8,%r9,__LC_IO_OLD_PSW
HANDLE_SIE_INTERCEPT %r14
HANDLE_SIE_INTERCEPT %r14,0
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
tmhh %r8,0x0001 # interrupting from user?
jz io_skip
@ -597,7 +603,7 @@ ENTRY(ext_int_handler)
lg %r12,__LC_THREAD_INFO
larl %r13,system_call
lmg %r8,%r9,__LC_EXT_OLD_PSW
HANDLE_SIE_INTERCEPT %r14
HANDLE_SIE_INTERCEPT %r14,0
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
tmhh %r8,0x0001 # interrupting from user ?
jz ext_skip
@ -645,7 +651,7 @@ ENTRY(mcck_int_handler)
lg %r12,__LC_THREAD_INFO
larl %r13,system_call
lmg %r8,%r9,__LC_MCK_OLD_PSW
HANDLE_SIE_INTERCEPT %r14
HANDLE_SIE_INTERCEPT %r14,0
tm __LC_MCCK_CODE,0x80 # system damage?
jo mcck_panic # yes -> rest of mcck code invalid
lghi %r14,__LC_CPU_TIMER_SAVE_AREA
@ -944,6 +950,13 @@ ENTRY(sie64a)
stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # host id == 0
lmg %r0,%r13,0(%r3) # load guest gprs 0-13
# some program checks are suppressing. C code (e.g. do_protection_exception)
# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
# instructions in the sie_loop should not cause program interrupts. So
# lets use a nop (47 00 00 00) as a landing pad.
# See also HANDLE_SIE_INTERCEPT
rewind_pad:
nop 0
sie_loop:
lg %r14,__LC_THREAD_INFO # pointer thread_info struct
tm __TI_flags+7(%r14),_TIF_EXIT_SIE
@ -983,6 +996,7 @@ sie_fault:
.Lhost_id:
.quad 0
EX_TABLE(rewind_pad,sie_fault)
EX_TABLE(sie_loop,sie_fault)
#endif

View File

@ -393,30 +393,35 @@ ENTRY(startup_kdump)
xc 0x300(256),0x300
xc 0xe00(256),0xe00
stck __LC_LAST_UPDATE_CLOCK
spt 5f-.LPG0(%r13)
mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13)
spt 6f-.LPG0(%r13)
mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
#ifndef CONFIG_MARCH_G5
# check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
.insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list
tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
jz 0f
la %r0,0
la %r0,1
.insn s,0xb2b00000,__LC_STFL_FAC_LIST # store facility list extended
0: l %r0,__LC_STFL_FAC_LIST
n %r0,2f+8-.LPG0(%r13)
cl %r0,2f+8-.LPG0(%r13)
jne 1f
l %r0,__LC_STFL_FAC_LIST+4
n %r0,2f+12-.LPG0(%r13)
cl %r0,2f+12-.LPG0(%r13)
je 3f
1: l %r15,.Lstack-.LPG0(%r13)
# verify if all required facilities are supported by the machine
0: la %r1,__LC_STFL_FAC_LIST
la %r2,3f+8-.LPG0(%r13)
l %r3,0(%r2)
1: l %r0,0(%r1)
n %r0,4(%r2)
cl %r0,4(%r2)
jne 2f
la %r1,4(%r1)
la %r2,4(%r2)
ahi %r3,-1
jnz 1b
j 4f
2: l %r15,.Lstack-.LPG0(%r13)
ahi %r15,-96
la %r2,.Lals_string-.LPG0(%r13)
l %r3,.Lsclp_print-.LPG0(%r13)
basr %r14,%r3
lpsw 2f-.LPG0(%r13) # machine type not good enough, crash
lpsw 3f-.LPG0(%r13) # machine type not good enough, crash
.Lals_string:
.asciz "The Linux kernel requires more recent processor hardware"
.Lsclp_print:
@ -424,33 +429,42 @@ ENTRY(startup_kdump)
.Lstack:
.long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
.align 16
2: .long 0x000a0000,0x8badcccc
3: .long 0x000a0000,0x8badcccc
# List of facilities that are required. If not all facilities are present
# the kernel will crash. Format is number of facility words with bits set,
# followed by the facility words.
#if defined(CONFIG_64BIT)
#if defined(CONFIG_MARCH_Z196)
.long 0xc100efe3, 0xf46c0000
#if defined(CONFIG_MARCH_ZEC12)
.long 3, 0xc100efe3, 0xf46ce000, 0x00400000
#elif defined(CONFIG_MARCH_Z196)
.long 2, 0xc100efe3, 0xf46c0000
#elif defined(CONFIG_MARCH_Z10)
.long 0xc100efe3, 0xf0680000
.long 2, 0xc100efe3, 0xf0680000
#elif defined(CONFIG_MARCH_Z9_109)
.long 0xc100efc3, 0x00000000
.long 1, 0xc100efc3
#elif defined(CONFIG_MARCH_Z990)
.long 0xc0002000, 0x00000000
.long 1, 0xc0002000
#elif defined(CONFIG_MARCH_Z900)
.long 0xc0000000, 0x00000000
.long 1, 0xc0000000
#endif
#else
#if defined(CONFIG_MARCH_Z196)
.long 0x8100c880, 0x00000000
#if defined(CONFIG_MARCH_ZEC12)
.long 1, 0x8100c880
#elif defined(CONFIG_MARCH_Z196)
.long 1, 0x8100c880
#elif defined(CONFIG_MARCH_Z10)
.long 0x8100c880, 0x00000000
.long 1, 0x8100c880
#elif defined(CONFIG_MARCH_Z9_109)
.long 0x8100c880, 0x00000000
.long 1, 0x8100c880
#elif defined(CONFIG_MARCH_Z990)
.long 0x80002000, 0x00000000
.long 1, 0x80002000
#elif defined(CONFIG_MARCH_Z900)
.long 0x80000000, 0x00000000
.long 1, 0x80000000
#endif
#endif
3:
4:
#endif
#ifdef CONFIG_64BIT
@ -459,14 +473,14 @@ ENTRY(startup_kdump)
jg startup_continue
#else
/* Continue with 31bit startup code in head31.S */
l %r13,4f-.LPG0(%r13)
l %r13,5f-.LPG0(%r13)
b 0(%r13)
.align 8
4: .long startup_continue
5: .long startup_continue
#endif
.align 8
5: .long 0x7fffffff,0xffffffff
6: .long 0x7fffffff,0xffffffff
#include "head_kdump.S"

View File

@ -58,6 +58,8 @@ static const struct irq_class intrclass_names[] = {
[IOINT_APB] = {.name = "APB", .desc = "[I/O] AP Bus"},
[IOINT_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"},
[IOINT_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
[IOINT_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" },
[IOINT_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" },
[NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"},
};

View File

@ -0,0 +1,152 @@
/*
* Program check table.
*
* Copyright IBM Corp. 2012
*/
#include <linux/linkage.h>
#ifdef CONFIG_32BIT
#define PGM_CHECK_64BIT(handler) .long default_trap_handler
#else
#define PGM_CHECK_64BIT(handler) .long handler
#endif
#define PGM_CHECK(handler) .long handler
#define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler)
/*
* The program check table contains exactly 128 (0x00-0x7f) entries. Each
* line defines the 31 and/or 64 bit function to be called corresponding
* to the program check interruption code.
*/
.section .rodata, "a"
ENTRY(pgm_check_table)
PGM_CHECK_DEFAULT /* 00 */
PGM_CHECK(illegal_op) /* 01 */
PGM_CHECK(privileged_op) /* 02 */
PGM_CHECK(execute_exception) /* 03 */
PGM_CHECK(do_protection_exception) /* 04 */
PGM_CHECK(addressing_exception) /* 05 */
PGM_CHECK(specification_exception) /* 06 */
PGM_CHECK(data_exception) /* 07 */
PGM_CHECK(overflow_exception) /* 08 */
PGM_CHECK(divide_exception) /* 09 */
PGM_CHECK(overflow_exception) /* 0a */
PGM_CHECK(divide_exception) /* 0b */
PGM_CHECK(hfp_overflow_exception) /* 0c */
PGM_CHECK(hfp_underflow_exception) /* 0d */
PGM_CHECK(hfp_significance_exception) /* 0e */
PGM_CHECK(hfp_divide_exception) /* 0f */
PGM_CHECK(do_dat_exception) /* 10 */
PGM_CHECK(do_dat_exception) /* 11 */
PGM_CHECK(translation_exception) /* 12 */
PGM_CHECK(special_op_exception) /* 13 */
PGM_CHECK_DEFAULT /* 14 */
PGM_CHECK(operand_exception) /* 15 */
PGM_CHECK_DEFAULT /* 16 */
PGM_CHECK_DEFAULT /* 17 */
PGM_CHECK_64BIT(transaction_exception) /* 18 */
PGM_CHECK_DEFAULT /* 19 */
PGM_CHECK_DEFAULT /* 1a */
PGM_CHECK_DEFAULT /* 1b */
PGM_CHECK(space_switch_exception) /* 1c */
PGM_CHECK(hfp_sqrt_exception) /* 1d */
PGM_CHECK_DEFAULT /* 1e */
PGM_CHECK_DEFAULT /* 1f */
PGM_CHECK_DEFAULT /* 20 */
PGM_CHECK_DEFAULT /* 21 */
PGM_CHECK_DEFAULT /* 22 */
PGM_CHECK_DEFAULT /* 23 */
PGM_CHECK_DEFAULT /* 24 */
PGM_CHECK_DEFAULT /* 25 */
PGM_CHECK_DEFAULT /* 26 */
PGM_CHECK_DEFAULT /* 27 */
PGM_CHECK_DEFAULT /* 28 */
PGM_CHECK_DEFAULT /* 29 */
PGM_CHECK_DEFAULT /* 2a */
PGM_CHECK_DEFAULT /* 2b */
PGM_CHECK_DEFAULT /* 2c */
PGM_CHECK_DEFAULT /* 2d */
PGM_CHECK_DEFAULT /* 2e */
PGM_CHECK_DEFAULT /* 2f */
PGM_CHECK_DEFAULT /* 30 */
PGM_CHECK_DEFAULT /* 31 */
PGM_CHECK_DEFAULT /* 32 */
PGM_CHECK_DEFAULT /* 33 */
PGM_CHECK_DEFAULT /* 34 */
PGM_CHECK_DEFAULT /* 35 */
PGM_CHECK_DEFAULT /* 36 */
PGM_CHECK_DEFAULT /* 37 */
PGM_CHECK_64BIT(do_asce_exception) /* 38 */
PGM_CHECK_64BIT(do_dat_exception) /* 39 */
PGM_CHECK_64BIT(do_dat_exception) /* 3a */
PGM_CHECK_64BIT(do_dat_exception) /* 3b */
PGM_CHECK_DEFAULT /* 3c */
PGM_CHECK_DEFAULT /* 3d */
PGM_CHECK_DEFAULT /* 3e */
PGM_CHECK_DEFAULT /* 3f */
PGM_CHECK_DEFAULT /* 40 */
PGM_CHECK_DEFAULT /* 41 */
PGM_CHECK_DEFAULT /* 42 */
PGM_CHECK_DEFAULT /* 43 */
PGM_CHECK_DEFAULT /* 44 */
PGM_CHECK_DEFAULT /* 45 */
PGM_CHECK_DEFAULT /* 46 */
PGM_CHECK_DEFAULT /* 47 */
PGM_CHECK_DEFAULT /* 48 */
PGM_CHECK_DEFAULT /* 49 */
PGM_CHECK_DEFAULT /* 4a */
PGM_CHECK_DEFAULT /* 4b */
PGM_CHECK_DEFAULT /* 4c */
PGM_CHECK_DEFAULT /* 4d */
PGM_CHECK_DEFAULT /* 4e */
PGM_CHECK_DEFAULT /* 4f */
PGM_CHECK_DEFAULT /* 50 */
PGM_CHECK_DEFAULT /* 51 */
PGM_CHECK_DEFAULT /* 52 */
PGM_CHECK_DEFAULT /* 53 */
PGM_CHECK_DEFAULT /* 54 */
PGM_CHECK_DEFAULT /* 55 */
PGM_CHECK_DEFAULT /* 56 */
PGM_CHECK_DEFAULT /* 57 */
PGM_CHECK_DEFAULT /* 58 */
PGM_CHECK_DEFAULT /* 59 */
PGM_CHECK_DEFAULT /* 5a */
PGM_CHECK_DEFAULT /* 5b */
PGM_CHECK_DEFAULT /* 5c */
PGM_CHECK_DEFAULT /* 5d */
PGM_CHECK_DEFAULT /* 5e */
PGM_CHECK_DEFAULT /* 5f */
PGM_CHECK_DEFAULT /* 60 */
PGM_CHECK_DEFAULT /* 61 */
PGM_CHECK_DEFAULT /* 62 */
PGM_CHECK_DEFAULT /* 63 */
PGM_CHECK_DEFAULT /* 64 */
PGM_CHECK_DEFAULT /* 65 */
PGM_CHECK_DEFAULT /* 66 */
PGM_CHECK_DEFAULT /* 67 */
PGM_CHECK_DEFAULT /* 68 */
PGM_CHECK_DEFAULT /* 69 */
PGM_CHECK_DEFAULT /* 6a */
PGM_CHECK_DEFAULT /* 6b */
PGM_CHECK_DEFAULT /* 6c */
PGM_CHECK_DEFAULT /* 6d */
PGM_CHECK_DEFAULT /* 6e */
PGM_CHECK_DEFAULT /* 6f */
PGM_CHECK_DEFAULT /* 70 */
PGM_CHECK_DEFAULT /* 71 */
PGM_CHECK_DEFAULT /* 72 */
PGM_CHECK_DEFAULT /* 73 */
PGM_CHECK_DEFAULT /* 74 */
PGM_CHECK_DEFAULT /* 75 */
PGM_CHECK_DEFAULT /* 76 */
PGM_CHECK_DEFAULT /* 77 */
PGM_CHECK_DEFAULT /* 78 */
PGM_CHECK_DEFAULT /* 79 */
PGM_CHECK_DEFAULT /* 7a */
PGM_CHECK_DEFAULT /* 7b */
PGM_CHECK_DEFAULT /* 7c */
PGM_CHECK_DEFAULT /* 7d */
PGM_CHECK_DEFAULT /* 7e */
PGM_CHECK_DEFAULT /* 7f */

View File

@ -777,40 +777,6 @@ static void __init reserve_crashkernel(void)
#endif
}
static void __init init_storage_keys(unsigned long start, unsigned long end)
{
unsigned long boundary, function, size;
while (start < end) {
if (MACHINE_HAS_EDAT2) {
/* set storage keys for a 2GB frame */
function = 0x22000 | PAGE_DEFAULT_KEY;
size = 1UL << 31;
boundary = (start + size) & ~(size - 1);
if (boundary <= end) {
do {
start = pfmf(function, start);
} while (start < boundary);
continue;
}
}
if (MACHINE_HAS_EDAT1) {
/* set storage keys for a 1MB frame */
function = 0x21000 | PAGE_DEFAULT_KEY;
size = 1UL << 20;
boundary = (start + size) & ~(size - 1);
if (boundary <= end) {
do {
start = pfmf(function, start);
} while (start < boundary);
continue;
}
}
page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
start += PAGE_SIZE;
}
}
static void __init setup_memory(void)
{
unsigned long bootmap_size;
@ -889,7 +855,7 @@ static void __init setup_memory(void)
memblock_add_node(PFN_PHYS(start_chunk),
PFN_PHYS(end_chunk - start_chunk), 0);
pfn = max(start_chunk, start_pfn);
init_storage_keys(PFN_PHYS(pfn), PFN_PHYS(end_chunk));
storage_key_init_range(PFN_PHYS(pfn), PFN_PHYS(end_chunk));
}
psw_set_key(PAGE_DEFAULT_KEY);
@ -1040,6 +1006,9 @@ static void __init setup_hwcaps(void)
case 0x2818:
strcpy(elf_platform, "z196");
break;
case 0x2827:
strcpy(elf_platform, "zEC12");
break;
}
}

View File

@ -461,6 +461,8 @@ void do_signal(struct pt_regs *regs)
/* Restart system call with magic TIF bit. */
regs->gprs[2] = regs->orig_gpr2;
set_thread_flag(TIF_SYSCALL);
if (test_thread_flag(TIF_SINGLE_STEP))
set_thread_flag(TIF_PER_TRAP);
break;
}
}

View File

@ -29,48 +29,38 @@ struct mask_info {
cpumask_t mask;
};
static int topology_enabled = 1;
static void set_topology_timer(void);
static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info;
static void set_topology_timer(void);
static int topology_enabled = 1;
static DECLARE_WORK(topology_work, topology_work_fn);
/* topology_lock protects the core linked list */
/* topology_lock protects the socket and book linked lists */
static DEFINE_SPINLOCK(topology_lock);
static struct mask_info core_info;
cpumask_t cpu_core_map[NR_CPUS];
unsigned char cpu_core_id[NR_CPUS];
unsigned char cpu_socket_id[NR_CPUS];
static struct mask_info socket_info;
static struct mask_info book_info;
cpumask_t cpu_book_map[NR_CPUS];
unsigned char cpu_book_id[NR_CPUS];
struct cpu_topology_s390 cpu_topology[NR_CPUS];
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
cpumask_t mask;
cpumask_clear(&mask);
if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
cpumask_copy(&mask, cpumask_of(cpu));
cpumask_copy(&mask, cpumask_of(cpu));
if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
return mask;
for (; info; info = info->next) {
if (cpumask_test_cpu(cpu, &info->mask))
return info->mask;
}
while (info) {
if (cpumask_test_cpu(cpu, &info->mask)) {
mask = info->mask;
break;
}
info = info->next;
}
if (cpumask_empty(&mask))
cpumask_copy(&mask, cpumask_of(cpu));
return mask;
}
static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
struct mask_info *book,
struct mask_info *core,
int one_core_per_cpu)
struct mask_info *socket,
int one_socket_per_cpu)
{
unsigned int cpu;
@ -80,28 +70,28 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
lcpu = smp_find_processor_id(rcpu);
if (lcpu >= 0) {
cpumask_set_cpu(lcpu, &book->mask);
cpu_book_id[lcpu] = book->id;
cpumask_set_cpu(lcpu, &core->mask);
cpu_core_id[lcpu] = rcpu;
if (one_core_per_cpu) {
cpu_socket_id[lcpu] = rcpu;
core = core->next;
} else {
cpu_socket_id[lcpu] = core->id;
}
smp_cpu_set_polarization(lcpu, tl_cpu->pp);
if (lcpu < 0)
continue;
cpumask_set_cpu(lcpu, &book->mask);
cpu_topology[lcpu].book_id = book->id;
cpumask_set_cpu(lcpu, &socket->mask);
cpu_topology[lcpu].core_id = rcpu;
if (one_socket_per_cpu) {
cpu_topology[lcpu].socket_id = rcpu;
socket = socket->next;
} else {
cpu_topology[lcpu].socket_id = socket->id;
}
smp_cpu_set_polarization(lcpu, tl_cpu->pp);
}
return core;
return socket;
}
static void clear_masks(void)
{
struct mask_info *info;
info = &core_info;
info = &socket_info;
while (info) {
cpumask_clear(&info->mask);
info = info->next;
@ -120,9 +110,9 @@ static union topology_entry *next_tle(union topology_entry *tle)
return (union topology_entry *)((struct topology_container *)tle + 1);
}
static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
static void __tl_to_masks_generic(struct sysinfo_15_1_x *info)
{
struct mask_info *core = &core_info;
struct mask_info *socket = &socket_info;
struct mask_info *book = &book_info;
union topology_entry *tle, *end;
@ -135,11 +125,11 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
book->id = tle->container.id;
break;
case 1:
core = core->next;
core->id = tle->container.id;
socket = socket->next;
socket->id = tle->container.id;
break;
case 0:
add_cpus_to_mask(&tle->cpu, book, core, 0);
add_cpus_to_mask(&tle->cpu, book, socket, 0);
break;
default:
clear_masks();
@ -149,9 +139,9 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
}
}
static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
{
struct mask_info *core = &core_info;
struct mask_info *socket = &socket_info;
struct mask_info *book = &book_info;
union topology_entry *tle, *end;
@ -164,7 +154,7 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
book->id = tle->container.id;
break;
case 0:
core = add_cpus_to_mask(&tle->cpu, book, core, 1);
socket = add_cpus_to_mask(&tle->cpu, book, socket, 1);
break;
default:
clear_masks();
@ -174,20 +164,20 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
}
}
static void tl_to_cores(struct sysinfo_15_1_x *info)
static void tl_to_masks(struct sysinfo_15_1_x *info)
{
struct cpuid cpu_id;
get_cpu_id(&cpu_id);
spin_lock_irq(&topology_lock);
get_cpu_id(&cpu_id);
clear_masks();
switch (cpu_id.machine) {
case 0x2097:
case 0x2098:
__tl_to_cores_z10(info);
__tl_to_masks_z10(info);
break;
default:
__tl_to_cores_generic(info);
__tl_to_masks_generic(info);
}
spin_unlock_irq(&topology_lock);
}
@ -232,15 +222,20 @@ int topology_set_cpu_management(int fc)
return rc;
}
static void update_cpu_core_map(void)
static void update_cpu_masks(void)
{
unsigned long flags;
int cpu;
spin_lock_irqsave(&topology_lock, flags);
for_each_possible_cpu(cpu) {
cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
if (!MACHINE_HAS_TOPOLOGY) {
cpu_topology[cpu].core_id = cpu;
cpu_topology[cpu].socket_id = cpu;
cpu_topology[cpu].book_id = cpu;
}
}
spin_unlock_irqrestore(&topology_lock, flags);
}
@ -260,13 +255,13 @@ int arch_update_cpu_topology(void)
int cpu;
if (!MACHINE_HAS_TOPOLOGY) {
update_cpu_core_map();
update_cpu_masks();
topology_update_polarization_simple();
return 0;
}
store_topology(info);
tl_to_cores(info);
update_cpu_core_map();
tl_to_masks(info);
update_cpu_masks();
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
@ -355,7 +350,7 @@ void __init s390_init_cpu_topology(void)
for (i = 0; i < TOPOLOGY_NR_MAG; i++)
printk(KERN_CONT " %d", info->mag[i]);
printk(KERN_CONT " / %d\n", info->mnest);
alloc_masks(info, &core_info, 1);
alloc_masks(info, &socket_info, 1);
alloc_masks(info, &book_info, 2);
}
@ -454,7 +449,7 @@ static int __init topology_init(void)
}
set_topology_timer();
out:
update_cpu_core_map();
update_cpu_masks();
return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
}
device_initcall(topology_init);

View File

@ -41,8 +41,6 @@
#include <asm/ipl.h>
#include "entry.h"
void (*pgm_check_table[128])(struct pt_regs *regs);
int show_unhandled_signals = 1;
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
@ -350,7 +348,7 @@ void __kprobes do_per_trap(struct pt_regs *regs)
force_sig_info(SIGTRAP, &info, current);
}
static void default_trap_handler(struct pt_regs *regs)
void default_trap_handler(struct pt_regs *regs)
{
if (user_mode(regs)) {
report_user_fault(regs, SIGSEGV);
@ -360,9 +358,9 @@ static void default_trap_handler(struct pt_regs *regs)
}
#define DO_ERROR_INFO(name, signr, sicode, str) \
static void name(struct pt_regs *regs) \
{ \
do_trap(regs, signr, sicode, str); \
void name(struct pt_regs *regs) \
{ \
do_trap(regs, signr, sicode, str); \
}
DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
@ -417,7 +415,7 @@ static inline void do_fp_trap(struct pt_regs *regs, int fpc)
do_trap(regs, SIGFPE, si_code, "floating point exception");
}
static void __kprobes illegal_op(struct pt_regs *regs)
void __kprobes illegal_op(struct pt_regs *regs)
{
siginfo_t info;
__u8 opcode[6];
@ -536,7 +534,7 @@ DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
"specification exception");
#endif
static void data_exception(struct pt_regs *regs)
void data_exception(struct pt_regs *regs)
{
__u16 __user *location;
int signal = 0;
@ -611,7 +609,7 @@ static void data_exception(struct pt_regs *regs)
do_trap(regs, signal, ILL_ILLOPN, "data exception");
}
static void space_switch_exception(struct pt_regs *regs)
void space_switch_exception(struct pt_regs *regs)
{
/* Set user psw back to home space mode. */
if (user_mode(regs))
@ -629,43 +627,7 @@ void __kprobes kernel_stack_overflow(struct pt_regs * regs)
panic("Corrupt kernel stack, can't continue.");
}
/* init is done in lowcore.S and head.S */
void __init trap_init(void)
{
int i;
for (i = 0; i < 128; i++)
pgm_check_table[i] = &default_trap_handler;
pgm_check_table[1] = &illegal_op;
pgm_check_table[2] = &privileged_op;
pgm_check_table[3] = &execute_exception;
pgm_check_table[4] = &do_protection_exception;
pgm_check_table[5] = &addressing_exception;
pgm_check_table[6] = &specification_exception;
pgm_check_table[7] = &data_exception;
pgm_check_table[8] = &overflow_exception;
pgm_check_table[9] = &divide_exception;
pgm_check_table[0x0A] = &overflow_exception;
pgm_check_table[0x0B] = &divide_exception;
pgm_check_table[0x0C] = &hfp_overflow_exception;
pgm_check_table[0x0D] = &hfp_underflow_exception;
pgm_check_table[0x0E] = &hfp_significance_exception;
pgm_check_table[0x0F] = &hfp_divide_exception;
pgm_check_table[0x10] = &do_dat_exception;
pgm_check_table[0x11] = &do_dat_exception;
pgm_check_table[0x12] = &translation_exception;
pgm_check_table[0x13] = &special_op_exception;
#ifdef CONFIG_64BIT
pgm_check_table[0x18] = &transaction_exception;
pgm_check_table[0x38] = &do_asce_exception;
pgm_check_table[0x39] = &do_dat_exception;
pgm_check_table[0x3A] = &do_dat_exception;
pgm_check_table[0x3B] = &do_dat_exception;
#endif /* CONFIG_64BIT */
pgm_check_table[0x15] = &operand_exception;
pgm_check_table[0x1C] = &space_switch_exception;
pgm_check_table[0x1D] = &hfp_sqrt_exception;
/* Enable machine checks early. */
local_mcck_enable();
}

View File

@ -2,9 +2,9 @@
# Makefile for the linux s390-specific parts of the memory manager.
#
obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \
page-states.o gup.o extable.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_DEBUG_SET_MODULE_RONX) += pageattr.o
obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o
obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o
obj-y += page-states.o gup.o extable.o pageattr.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o

View File

@ -150,6 +150,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
static void walk_pud_level(struct seq_file *m, struct pg_state *st,
pgd_t *pgd, unsigned long addr)
{
unsigned int prot;
pud_t *pud;
int i;
@ -157,7 +158,11 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
st->current_address = addr;
pud = pud_offset(pgd, addr);
if (!pud_none(*pud))
walk_pmd_level(m, st, pud, addr);
if (pud_large(*pud)) {
prot = pud_val(*pud) & _PAGE_RO;
note_page(m, st, prot, 2);
} else
walk_pmd_level(m, st, pud, addr);
else
note_page(m, st, _PAGE_INVALID, 2);
addr += PUD_SIZE;

View File

@ -49,15 +49,19 @@
#define VM_FAULT_BADCONTEXT 0x010000
#define VM_FAULT_BADMAP 0x020000
#define VM_FAULT_BADACCESS 0x040000
#define VM_FAULT_SIGNAL 0x080000
#define VM_FAULT_SIGNAL 0x080000
static unsigned long store_indication;
static unsigned long store_indication __read_mostly;
void fault_init(void)
#ifdef CONFIG_64BIT
static int __init fault_init(void)
{
if (test_facility(2) && test_facility(75))
if (test_facility(75))
store_indication = 0xc00;
return 0;
}
early_initcall(fault_init);
#endif
static inline int notify_page_fault(struct pt_regs *regs)
{
@ -273,10 +277,16 @@ static inline int do_exception(struct pt_regs *regs, int access)
unsigned int flags;
int fault;
tsk = current;
/*
* The instruction that caused the program check has
* been nullified. Don't signal single step via SIGTRAP.
*/
clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
if (notify_page_fault(regs))
return 0;
tsk = current;
mm = tsk->mm;
trans_exc_code = regs->int_parm_long;
@ -372,11 +382,6 @@ retry:
goto retry;
}
}
/*
* The instruction that caused the program check will
* be repeated. Don't signal single step via SIGTRAP.
*/
clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
fault = 0;
out_up:
up_read(&mm->mmap_sem);
@ -423,6 +428,12 @@ void __kprobes do_asce_exception(struct pt_regs *regs)
struct vm_area_struct *vma;
unsigned long trans_exc_code;
/*
* The instruction that caused the program check has
* been nullified. Don't signal single step via SIGTRAP.
*/
clear_tsk_thread_flag(current, TIF_PER_TRAP);
trans_exc_code = regs->int_parm_long;
if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto no_context;

View File

@ -125,7 +125,6 @@ void __init paging_init(void)
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(max_zone_pfns);
fault_init();
}
void __init mem_init(void)
@ -159,34 +158,6 @@ void __init mem_init(void)
PFN_ALIGN((unsigned long)&_eshared) - 1);
}
#ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long address;
int i;
for (i = 0; i < numpages; i++) {
address = page_to_phys(page + i);
pgd = pgd_offset_k(address);
pud = pud_offset(pgd, address);
pmd = pmd_offset(pud, address);
pte = pte_offset_kernel(pmd, address);
if (!enable) {
__ptep_ipte(address, pte);
pte_val(*pte) = _PAGE_TYPE_EMPTY;
continue;
}
*pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
/* Flush cpu write queue. */
mb();
}
}
#endif
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
unsigned long addr = begin;

View File

@ -2,11 +2,46 @@
* Copyright IBM Corp. 2011
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/hugetlb.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
#include <asm/page.h>
void storage_key_init_range(unsigned long start, unsigned long end)
{
unsigned long boundary, function, size;
while (start < end) {
if (MACHINE_HAS_EDAT2) {
/* set storage keys for a 2GB frame */
function = 0x22000 | PAGE_DEFAULT_KEY;
size = 1UL << 31;
boundary = (start + size) & ~(size - 1);
if (boundary <= end) {
do {
start = pfmf(function, start);
} while (start < boundary);
continue;
}
}
if (MACHINE_HAS_EDAT1) {
/* set storage keys for a 1MB frame */
function = 0x21000 | PAGE_DEFAULT_KEY;
size = 1UL << 20;
boundary = (start + size) & ~(size - 1);
if (boundary <= end) {
do {
start = pfmf(function, start);
} while (start < boundary);
continue;
}
}
page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
start += PAGE_SIZE;
}
}
static pte_t *walk_page_table(unsigned long addr)
{
@ -19,7 +54,7 @@ static pte_t *walk_page_table(unsigned long addr)
if (pgd_none(*pgdp))
return NULL;
pudp = pud_offset(pgdp, addr);
if (pud_none(*pudp))
if (pud_none(*pudp) || pud_large(*pudp))
return NULL;
pmdp = pmd_offset(pudp, addr);
if (pmd_none(*pmdp) || pmd_large(*pmdp))
@ -70,3 +105,46 @@ int set_memory_x(unsigned long addr, int numpages)
{
return 0;
}
#ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long address;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int i;
for (i = 0; i < numpages; i++) {
address = page_to_phys(page + i);
pgd = pgd_offset_k(address);
pud = pud_offset(pgd, address);
pmd = pmd_offset(pud, address);
pte = pte_offset_kernel(pmd, address);
if (!enable) {
__ptep_ipte(address, pte);
pte_val(*pte) = _PAGE_TYPE_EMPTY;
continue;
}
*pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
}
}
#ifdef CONFIG_HIBERNATION
bool kernel_page_present(struct page *page)
{
unsigned long addr;
int cc;
addr = page_to_phys(page);
asm volatile(
" lra %1,0(%1)\n"
" ipm %0\n"
" srl %0,28"
: "=d" (cc), "+a" (addr) : : "cc");
return cc == 0;
}
#endif /* CONFIG_HIBERNATION */
#endif /* CONFIG_DEBUG_PAGEALLOC */

View File

@ -881,22 +881,6 @@ int s390_enable_sie(void)
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
bool kernel_page_present(struct page *page)
{
unsigned long addr;
int cc;
addr = page_to_phys(page);
asm volatile(
" lra %1,0(%1)\n"
" ipm %0\n"
" srl %0,28"
: "=d" (cc), "+a" (addr) : : "cc");
return cc == 0;
}
#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)

View File

@ -89,6 +89,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
int ret = -ENOMEM;
while (address < end) {
pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc();
@ -96,18 +97,24 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
goto out;
pgd_populate(&init_mm, pg_dir, pu_dir);
}
pu_dir = pud_offset(pg_dir, address);
#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
!(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
pte_val(pte) |= _REGION3_ENTRY_LARGE;
pte_val(pte) |= _REGION_ENTRY_TYPE_R3;
pud_val(*pu_dir) = pte_val(pte);
address += PUD_SIZE;
continue;
}
#endif
if (pud_none(*pu_dir)) {
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
goto out;
pud_populate(&init_mm, pu_dir, pm_dir);
}
pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
pm_dir = pmd_offset(pu_dir, address);
#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
@ -160,6 +167,11 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
address += PUD_SIZE;
continue;
}
if (pud_large(*pu_dir)) {
pud_clear(pu_dir);
address += PUD_SIZE;
continue;
}
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
address += PMD_SIZE;
@ -193,7 +205,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
start_addr = (unsigned long) start;
end_addr = (unsigned long) (start + nr);
for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
for (address = start_addr; address < end_addr;) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc();
@ -212,10 +224,33 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
#ifdef CONFIG_64BIT
/* Use 1MB frames for vmemmap if available. We always
* use large frames even if they are only partially
* used.
* Otherwise we would have also page tables since
* vmemmap_populate gets called for each section
* separately. */
if (MACHINE_HAS_EDAT1) {
void *new_page;
new_page = vmemmap_alloc_block(PMD_SIZE, node);
if (!new_page)
goto out;
pte = mk_pte_phys(__pa(new_page), PAGE_RW);
pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
pmd_val(*pm_dir) = pte_val(pte);
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
#endif
pt_dir = vmem_pte_alloc(address);
if (!pt_dir)
goto out;
pmd_populate(&init_mm, pm_dir, pt_dir);
} else if (pmd_large(*pm_dir)) {
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
pt_dir = pte_offset_kernel(pm_dir, address);
@ -228,6 +263,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
*pt_dir = pte;
}
address += PAGE_SIZE;
}
memset(start, 0, nr * sizeof(struct page));
ret = 0;

View File

@ -341,6 +341,27 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* lr %r5,%r4 */
EMIT2(0x1854);
break;
case BPF_S_ALU_MOD_X: /* A %= X */
jit->seen |= SEEN_XREG | SEEN_RET0;
/* ltr %r12,%r12 */
EMIT2(0x12cc);
/* jz <ret0> */
EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
/* lhi %r4,0 */
EMIT4(0xa7480000);
/* dr %r4,%r12 */
EMIT2(0x1d4c);
/* lr %r5,%r4 */
EMIT2(0x1854);
break;
case BPF_S_ALU_MOD_K: /* A %= K */
/* lhi %r4,0 */
EMIT4(0xa7480000);
/* d %r4,<d(K)>(%r13) */
EMIT4_DISP(0x5d40d000, EMIT_CONST(K));
/* lr %r5,%r4 */
EMIT2(0x1854);
break;
case BPF_S_ALU_AND_X: /* A &= X */
jit->seen |= SEEN_XREG;
/* nr %r5,%r12 */
@ -368,10 +389,17 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
EMIT4_DISP(0x5650d000, EMIT_CONST(K));
break;
case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
case BPF_S_ALU_XOR_X:
jit->seen |= SEEN_XREG;
/* xr %r5,%r12 */
EMIT2(0x175c);
break;
case BPF_S_ALU_XOR_K: /* A ^= K */
if (!K)
break;
/* x %r5,<d(K)>(%r13) */
EMIT4_DISP(0x5750d000, EMIT_CONST(K));
break;
case BPF_S_ALU_LSH_X: /* A <<= X; */
jit->seen |= SEEN_XREG;
/* sll %r5,0(%r12) */

6
arch/s390/pci/Makefile Normal file
View File

@ -0,0 +1,6 @@
#
# Makefile for the s390 PCI subsystem.
#
obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o \
pci_sysfs.o pci_event.o

1103
arch/s390/pci/pci.c Normal file

File diff suppressed because it is too large Load Diff

324
arch/s390/pci/pci_clp.c Normal file
View File

@ -0,0 +1,324 @@
/*
* Copyright IBM Corp. 2012
*
* Author(s):
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
#define COMPONENT "zPCI"
#define pr_fmt(fmt) COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <asm/pci_clp.h>
/*
* Call Logical Processor
* Retry logic is handled by the caller.
*/
static inline u8 clp_instr(void *req)
{
u64 ilpm;
u8 cc;
asm volatile (
" .insn rrf,0xb9a00000,%[ilpm],%[req],0x0,0x2\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [ilpm] "=d" (ilpm)
: [req] "a" (req)
: "cc", "memory");
return cc;
}
static void *clp_alloc_block(void)
{
struct page *page = alloc_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE));
return (page) ? page_address(page) : NULL;
}
static void clp_free_block(void *ptr)
{
free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
}
static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
struct clp_rsp_query_pci_grp *response)
{
zdev->tlb_refresh = response->refresh;
zdev->dma_mask = response->dasm;
zdev->msi_addr = response->msia;
pr_debug("Supported number of MSI vectors: %u\n", response->noi);
switch (response->version) {
case 1:
zdev->max_bus_speed = PCIE_SPEED_5_0GT;
break;
default:
zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
break;
}
}
static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
{
struct clp_req_rsp_query_pci_grp *rrb;
int rc;
rrb = clp_alloc_block();
if (!rrb)
return -ENOMEM;
memset(rrb, 0, sizeof(*rrb));
rrb->request.hdr.len = sizeof(rrb->request);
rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
rrb->response.hdr.len = sizeof(rrb->response);
rrb->request.pfgid = pfgid;
rc = clp_instr(rrb);
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
clp_store_query_pci_fngrp(zdev, &rrb->response);
else {
pr_err("Query PCI FNGRP failed with response: %x cc: %d\n",
rrb->response.hdr.rsp, rc);
rc = -EIO;
}
clp_free_block(rrb);
return rc;
}
static int clp_store_query_pci_fn(struct zpci_dev *zdev,
struct clp_rsp_query_pci *response)
{
int i;
for (i = 0; i < PCI_BAR_COUNT; i++) {
zdev->bars[i].val = le32_to_cpu(response->bar[i]);
zdev->bars[i].size = response->bar_size[i];
}
zdev->start_dma = response->sdma;
zdev->end_dma = response->edma;
zdev->pchid = response->pchid;
zdev->pfgid = response->pfgid;
return 0;
}
static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
{
struct clp_req_rsp_query_pci *rrb;
int rc;
rrb = clp_alloc_block();
if (!rrb)
return -ENOMEM;
memset(rrb, 0, sizeof(*rrb));
rrb->request.hdr.len = sizeof(rrb->request);
rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
rrb->response.hdr.len = sizeof(rrb->response);
rrb->request.fh = fh;
rc = clp_instr(rrb);
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
rc = clp_store_query_pci_fn(zdev, &rrb->response);
if (rc)
goto out;
if (rrb->response.pfgid)
rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
} else {
pr_err("Query PCI failed with response: %x cc: %d\n",
rrb->response.hdr.rsp, rc);
rc = -EIO;
}
out:
clp_free_block(rrb);
return rc;
}
int clp_add_pci_device(u32 fid, u32 fh, int configured)
{
struct zpci_dev *zdev;
int rc;
zdev = zpci_alloc_device();
if (IS_ERR(zdev))
return PTR_ERR(zdev);
zdev->fh = fh;
zdev->fid = fid;
/* Query function properties and update zdev */
rc = clp_query_pci_fn(zdev, fh);
if (rc)
goto error;
if (configured)
zdev->state = ZPCI_FN_STATE_CONFIGURED;
else
zdev->state = ZPCI_FN_STATE_STANDBY;
rc = zpci_create_device(zdev);
if (rc)
goto error;
return 0;
error:
zpci_free_device(zdev);
return rc;
}
/*
* Enable/Disable a given PCI function defined by its function handle.
*/
static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
{
struct clp_req_rsp_set_pci *rrb;
int rc, retries = 1000;
rrb = clp_alloc_block();
if (!rrb)
return -ENOMEM;
do {
memset(rrb, 0, sizeof(*rrb));
rrb->request.hdr.len = sizeof(rrb->request);
rrb->request.hdr.cmd = CLP_SET_PCI_FN;
rrb->response.hdr.len = sizeof(rrb->response);
rrb->request.fh = *fh;
rrb->request.oc = command;
rrb->request.ndas = nr_dma_as;
rc = clp_instr(rrb);
if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
retries--;
if (retries < 0)
break;
msleep(1);
}
} while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
*fh = rrb->response.fh;
else {
pr_err("Set PCI FN failed with response: %x cc: %d\n",
rrb->response.hdr.rsp, rc);
rc = -EIO;
}
clp_free_block(rrb);
return rc;
}
int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
{
u32 fh = zdev->fh;
int rc;
rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
if (!rc)
/* Success -> store enabled handle in zdev */
zdev->fh = fh;
return rc;
}
int clp_disable_fh(struct zpci_dev *zdev)
{
u32 fh = zdev->fh;
int rc;
if (!zdev_enabled(zdev))
return 0;
dev_info(&zdev->pdev->dev, "disabling fn handle: 0x%x\n", fh);
rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN);
if (!rc)
/* Success -> store disabled handle in zdev */
zdev->fh = fh;
else
dev_err(&zdev->pdev->dev,
"Failed to disable fn handle: 0x%x\n", fh);
return rc;
}
static void clp_check_pcifn_entry(struct clp_fh_list_entry *entry)
{
int present, rc;
if (!entry->vendor_id)
return;
/* TODO: be a little bit more scalable */
present = zpci_fid_present(entry->fid);
if (present)
pr_debug("%s: device %x already present\n", __func__, entry->fid);
/* skip already used functions */
if (present && entry->config_state)
return;
/* aev 306: function moved to stand-by state */
if (present && !entry->config_state) {
/*
* The handle is already disabled, that means no iota/irq freeing via
* the firmware interfaces anymore. Need to free resources manually
* (DMA memory, debug, sysfs)...
*/
zpci_stop_device(get_zdev_by_fid(entry->fid));
return;
}
rc = clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
if (rc)
pr_err("Failed to add fid: 0x%x\n", entry->fid);
}
int clp_find_pci_devices(void)
{
struct clp_req_rsp_list_pci *rrb;
u64 resume_token = 0;
int entries, i, rc;
rrb = clp_alloc_block();
if (!rrb)
return -ENOMEM;
do {
memset(rrb, 0, sizeof(*rrb));
rrb->request.hdr.len = sizeof(rrb->request);
rrb->request.hdr.cmd = CLP_LIST_PCI;
/* store as many entries as possible */
rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
rrb->request.resume_token = resume_token;
/* Get PCI function handle list */
rc = clp_instr(rrb);
if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
pr_err("List PCI failed with response: 0x%x cc: %d\n",
rrb->response.hdr.rsp, rc);
rc = -EIO;
goto out;
}
WARN_ON_ONCE(rrb->response.entry_size !=
sizeof(struct clp_fh_list_entry));
entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
rrb->response.entry_size;
pr_info("Detected number of PCI functions: %u\n", entries);
/* Store the returned resume token as input for the next call */
resume_token = rrb->response.resume_token;
for (i = 0; i < entries; i++)
clp_check_pcifn_entry(&rrb->response.fh_list[i]);
} while (resume_token);
pr_debug("Maximum number of supported PCI functions: %u\n",
rrb->response.max_fn);
out:
clp_free_block(rrb);
return rc;
}

506
arch/s390/pci/pci_dma.c Normal file
View File

@ -0,0 +1,506 @@
/*
* Copyright IBM Corp. 2012
*
* Author(s):
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/iommu-helper.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <asm/pci_dma.h>
static enum zpci_ioat_dtype zpci_ioat_dt = ZPCI_IOTA_RTTO;
static struct kmem_cache *dma_region_table_cache;
static struct kmem_cache *dma_page_table_cache;
static unsigned long *dma_alloc_cpu_table(void)
{
unsigned long *table, *entry;
table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
if (!table)
return NULL;
for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
*entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
return table;
}
static void dma_free_cpu_table(void *table)
{
kmem_cache_free(dma_region_table_cache, table);
}
static unsigned long *dma_alloc_page_table(void)
{
unsigned long *table, *entry;
table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
if (!table)
return NULL;
for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
*entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
return table;
}
static void dma_free_page_table(void *table)
{
kmem_cache_free(dma_page_table_cache, table);
}
static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
{
unsigned long *sto;
if (reg_entry_isvalid(*entry))
sto = get_rt_sto(*entry);
else {
sto = dma_alloc_cpu_table();
if (!sto)
return NULL;
set_rt_sto(entry, sto);
validate_rt_entry(entry);
entry_clr_protected(entry);
}
return sto;
}
static unsigned long *dma_get_page_table_origin(unsigned long *entry)
{
unsigned long *pto;
if (reg_entry_isvalid(*entry))
pto = get_st_pto(*entry);
else {
pto = dma_alloc_page_table();
if (!pto)
return NULL;
set_st_pto(entry, pto);
validate_st_entry(entry);
entry_clr_protected(entry);
}
return pto;
}
static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
{
unsigned long *sto, *pto;
unsigned int rtx, sx, px;
rtx = calc_rtx(dma_addr);
sto = dma_get_seg_table_origin(&rto[rtx]);
if (!sto)
return NULL;
sx = calc_sx(dma_addr);
pto = dma_get_page_table_origin(&sto[sx]);
if (!pto)
return NULL;
px = calc_px(dma_addr);
return &pto[px];
}
static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr,
dma_addr_t dma_addr, int flags)
{
unsigned long *entry;
entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
if (!entry) {
WARN_ON_ONCE(1);
return;
}
if (flags & ZPCI_PTE_INVALID) {
invalidate_pt_entry(entry);
return;
} else {
set_pt_pfaa(entry, page_addr);
validate_pt_entry(entry);
}
if (flags & ZPCI_TABLE_PROTECTED)
entry_set_protected(entry);
else
entry_clr_protected(entry);
}
static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
dma_addr_t dma_addr, size_t size, int flags)
{
unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
u8 *page_addr = (u8 *) (pa & PAGE_MASK);
dma_addr_t start_dma_addr = dma_addr;
unsigned long irq_flags;
int i, rc = 0;
if (!nr_pages)
return -EINVAL;
spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
if (!zdev->dma_table) {
dev_err(&zdev->pdev->dev, "Missing DMA table\n");
goto no_refresh;
}
for (i = 0; i < nr_pages; i++) {
dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
page_addr += PAGE_SIZE;
dma_addr += PAGE_SIZE;
}
/*
* rpcit is not required to establish new translations when previously
* invalid translation-table entries are validated, however it is
* required when altering previously valid entries.
*/
if (!zdev->tlb_refresh &&
((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
/*
* TODO: also need to check that the old entry is indeed INVALID
* and not only for one page but for the whole range...
* -> now we WARN_ON in that case but with lazy unmap that
* needs to be redone!
*/
goto no_refresh;
rc = rpcit_instr((u64) zdev->fh << 32, start_dma_addr,
nr_pages * PAGE_SIZE);
no_refresh:
spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
return rc;
}
static void dma_free_seg_table(unsigned long entry)
{
unsigned long *sto = get_rt_sto(entry);
int sx;
for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
if (reg_entry_isvalid(sto[sx]))
dma_free_page_table(get_st_pto(sto[sx]));
dma_free_cpu_table(sto);
}
static void dma_cleanup_tables(struct zpci_dev *zdev)
{
unsigned long *table;
int rtx;
if (!zdev || !zdev->dma_table)
return;
table = zdev->dma_table;
for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
if (reg_entry_isvalid(table[rtx]))
dma_free_seg_table(table[rtx]);
dma_free_cpu_table(table);
zdev->dma_table = NULL;
}
static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start,
int size)
{
unsigned long boundary_size = 0x1000000;
return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
start, size, 0, boundary_size, 0);
}
static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
{
unsigned long offset, flags;
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
if (offset == -1)
offset = __dma_alloc_iommu(zdev, 0, size);
if (offset != -1) {
zdev->next_bit = offset + size;
if (zdev->next_bit >= zdev->iommu_pages)
zdev->next_bit = 0;
}
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
return offset;
}
static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
{
unsigned long flags;
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
if (!zdev->iommu_bitmap)
goto out;
bitmap_clear(zdev->iommu_bitmap, offset, size);
if (offset >= zdev->next_bit)
zdev->next_bit = offset + size;
out:
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
}
int dma_set_mask(struct device *dev, u64 mask)
{
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
EXPORT_SYMBOL_GPL(dma_set_mask);
static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
unsigned long nr_pages, iommu_page_index;
unsigned long pa = page_to_phys(page) + offset;
int flags = ZPCI_PTE_VALID;
dma_addr_t dma_addr;
WARN_ON_ONCE(offset > PAGE_SIZE);
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
if (iommu_page_index == -1)
goto out_err;
/* Use rounded up size */
size = nr_pages * PAGE_SIZE;
dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
if (dma_addr + size > zdev->end_dma) {
dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n",
dma_addr, size, zdev->end_dma);
goto out_free;
}
if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
flags |= ZPCI_TABLE_PROTECTED;
if (!dma_update_trans(zdev, pa, dma_addr, size, flags))
return dma_addr + offset;
out_free:
dma_free_iommu(zdev, iommu_page_index, nr_pages);
out_err:
dev_err(dev, "Failed to map addr: %lx\n", pa);
return DMA_ERROR_CODE;
}
static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
unsigned long iommu_page_index;
int npages;
npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr = dma_addr & PAGE_MASK;
if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID))
dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr);
iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
dma_free_iommu(zdev, iommu_page_index, npages);
}
static void *s390_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{
struct page *page;
unsigned long pa;
dma_addr_t map;
size = PAGE_ALIGN(size);
page = alloc_pages(flag, get_order(size));
if (!page)
return NULL;
pa = page_to_phys(page);
memset((void *) pa, 0, size);
map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE,
size, DMA_BIDIRECTIONAL, NULL);
if (dma_mapping_error(dev, map)) {
free_pages(pa, get_order(size));
return NULL;
}
if (dma_handle)
*dma_handle = map;
return (void *) pa;
}
static void s390_dma_free(struct device *dev, size_t size,
void *pa, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size),
DMA_BIDIRECTIONAL, NULL);
free_pages((unsigned long) pa, get_order(size));
}
static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nr_elements, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
int mapped_elements = 0;
struct scatterlist *s;
int i;
for_each_sg(sg, s, nr_elements, i) {
struct page *page = sg_page(s);
s->dma_address = s390_dma_map_pages(dev, page, s->offset,
s->length, dir, NULL);
if (!dma_mapping_error(dev, s->dma_address)) {
s->dma_length = s->length;
mapped_elements++;
} else
goto unmap;
}
out:
return mapped_elements;
unmap:
for_each_sg(sg, s, mapped_elements, i) {
if (s->dma_address)
s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
dir, NULL);
s->dma_address = 0;
s->dma_length = 0;
}
mapped_elements = 0;
goto out;
}
static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nr_elements, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nr_elements, i) {
s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
s->dma_address = 0;
s->dma_length = 0;
}
}
int zpci_dma_init_device(struct zpci_dev *zdev)
{
unsigned int bitmap_order;
int rc;
spin_lock_init(&zdev->iommu_bitmap_lock);
spin_lock_init(&zdev->dma_table_lock);
zdev->dma_table = dma_alloc_cpu_table();
if (!zdev->dma_table) {
rc = -ENOMEM;
goto out_clean;
}
zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
bitmap_order = get_order(zdev->iommu_pages / 8);
pr_info("iommu_size: 0x%lx iommu_pages: 0x%lx bitmap_order: %i\n",
zdev->iommu_size, zdev->iommu_pages, bitmap_order);
zdev->iommu_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
bitmap_order);
if (!zdev->iommu_bitmap) {
rc = -ENOMEM;
goto out_reg;
}
rc = zpci_register_ioat(zdev,
0,
zdev->start_dma + PAGE_OFFSET,
zdev->start_dma + zdev->iommu_size - 1,
(u64) zdev->dma_table);
if (rc)
goto out_reg;
return 0;
out_reg:
dma_free_cpu_table(zdev->dma_table);
out_clean:
return rc;
}
void zpci_dma_exit_device(struct zpci_dev *zdev)
{
zpci_unregister_ioat(zdev, 0);
dma_cleanup_tables(zdev);
free_pages((unsigned long) zdev->iommu_bitmap,
get_order(zdev->iommu_pages / 8));
zdev->iommu_bitmap = NULL;
zdev->next_bit = 0;
}
static int __init dma_alloc_cpu_table_caches(void)
{
dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
0, NULL);
if (!dma_region_table_cache)
return -ENOMEM;
dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
0, NULL);
if (!dma_page_table_cache) {
kmem_cache_destroy(dma_region_table_cache);
return -ENOMEM;
}
return 0;
}
int __init zpci_dma_init(void)
{
return dma_alloc_cpu_table_caches();
}
void zpci_dma_exit(void)
{
kmem_cache_destroy(dma_page_table_cache);
kmem_cache_destroy(dma_region_table_cache);
}
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
static int __init dma_debug_do_init(void)
{
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
return 0;
}
fs_initcall(dma_debug_do_init);
struct dma_map_ops s390_dma_ops = {
.alloc = s390_dma_alloc,
.free = s390_dma_free,
.map_sg = s390_dma_map_sg,
.unmap_sg = s390_dma_unmap_sg,
.map_page = s390_dma_map_pages,
.unmap_page = s390_dma_unmap_pages,
/* if we support direct DMA this must be conditional */
.is_phys = 0,
/* dma_supported is unconditionally true without a callback */
};
EXPORT_SYMBOL_GPL(s390_dma_ops);

93
arch/s390/pci/pci_event.c Normal file
View File

@ -0,0 +1,93 @@
/*
* Copyright IBM Corp. 2012
*
* Author(s):
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
#define COMPONENT "zPCI"
#define pr_fmt(fmt) COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/pci.h>
/* Content Code Description for PCI Function Error */
struct zpci_ccdf_err {
u32 reserved1;
u32 fh; /* function handle */
u32 fid; /* function id */
u32 ett : 4; /* expected table type */
u32 mvn : 12; /* MSI vector number */
u32 dmaas : 8; /* DMA address space */
u32 : 6;
u32 q : 1; /* event qualifier */
u32 rw : 1; /* read/write */
u64 faddr; /* failing address */
u32 reserved3;
u16 reserved4;
u16 pec; /* PCI event code */
} __packed;
/* Content Code Description for PCI Function Availability */
struct zpci_ccdf_avail {
u32 reserved1;
u32 fh; /* function handle */
u32 fid; /* function id */
u32 reserved2;
u32 reserved3;
u32 reserved4;
u32 reserved5;
u16 reserved6;
u16 pec; /* PCI event code */
} __packed;
static void zpci_event_log_err(struct zpci_ccdf_err *ccdf)
{
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
dev_err(&zdev->pdev->dev, "event code: 0x%x\n", ccdf->pec);
}
static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf)
{
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
pr_err("%s%s: availability event: fh: 0x%x fid: 0x%x event code: 0x%x reason:",
(zdev) ? dev_driver_string(&zdev->pdev->dev) : "?",
(zdev) ? dev_name(&zdev->pdev->dev) : "?",
ccdf->fh, ccdf->fid, ccdf->pec);
print_hex_dump(KERN_CONT, "ccdf", DUMP_PREFIX_OFFSET,
16, 1, ccdf, sizeof(*ccdf), false);
switch (ccdf->pec) {
case 0x0301:
zpci_enable_device(zdev);
break;
case 0x0302:
clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
break;
case 0x0306:
clp_find_pci_devices();
break;
default:
break;
}
}
void zpci_event_error(void *data)
{
struct zpci_ccdf_err *ccdf = data;
struct zpci_dev *zdev;
zpci_event_log_err(ccdf);
zdev = get_zdev_by_fid(ccdf->fid);
if (!zdev) {
pr_err("Error event for unknown fid: %x", ccdf->fid);
return;
}
}
void zpci_event_availability(void *data)
{
zpci_event_log_avail(data);
}

141
arch/s390/pci/pci_msi.c Normal file
View File

@ -0,0 +1,141 @@
/*
* Copyright IBM Corp. 2012
*
* Author(s):
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
#define COMPONENT "zPCI"
#define pr_fmt(fmt) COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/rculist.h>
#include <linux/hash.h>
#include <linux/pci.h>
#include <linux/msi.h>
#include <asm/hw_irq.h>
/* mapping of irq numbers to msi_desc */
static struct hlist_head *msi_hash;
static unsigned int msihash_shift = 6;
#define msi_hashfn(nr) hash_long(nr, msihash_shift)
static DEFINE_SPINLOCK(msi_map_lock);
struct msi_desc *__irq_get_msi_desc(unsigned int irq)
{
struct hlist_node *entry;
struct msi_map *map;
hlist_for_each_entry_rcu(map, entry,
&msi_hash[msi_hashfn(irq)], msi_chain)
if (map->irq == irq)
return map->msi;
return NULL;
}
int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
{
if (msi->msi_attrib.is_msix) {
int offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_VECTOR_CTRL;
msi->masked = readl(msi->mask_base + offset);
writel(flag, msi->mask_base + offset);
} else {
if (msi->msi_attrib.maskbit) {
int pos;
u32 mask_bits;
pos = (long) msi->mask_base;
pci_read_config_dword(msi->dev, pos, &mask_bits);
mask_bits &= ~(mask);
mask_bits |= flag & mask;
pci_write_config_dword(msi->dev, pos, mask_bits);
} else {
return 0;
}
}
msi->msi_attrib.maskbit = !!flag;
return 1;
}
int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
unsigned int nr, int offset)
{
struct msi_map *map;
struct msi_msg msg;
int rc;
map = kmalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL)
return -ENOMEM;
map->irq = nr;
map->msi = msi;
zdev->msi_map[nr & ZPCI_MSI_MASK] = map;
pr_debug("%s hashing irq: %u to bucket nr: %llu\n",
__func__, nr, msi_hashfn(nr));
hlist_add_head_rcu(&map->msi_chain, &msi_hash[msi_hashfn(nr)]);
spin_lock(&msi_map_lock);
rc = irq_set_msi_desc(nr, msi);
if (rc) {
spin_unlock(&msi_map_lock);
hlist_del_rcu(&map->msi_chain);
kfree(map);
zdev->msi_map[nr & ZPCI_MSI_MASK] = NULL;
return rc;
}
spin_unlock(&msi_map_lock);
msg.data = nr - offset;
msg.address_lo = zdev->msi_addr & 0xffffffff;
msg.address_hi = zdev->msi_addr >> 32;
write_msi_msg(nr, &msg);
return 0;
}
void zpci_teardown_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi)
{
int irq = msi->irq & ZPCI_MSI_MASK;
struct msi_map *map;
msi->msg.address_lo = 0;
msi->msg.address_hi = 0;
msi->msg.data = 0;
msi->irq = 0;
zpci_msi_set_mask_bits(msi, 1, 1);
spin_lock(&msi_map_lock);
map = zdev->msi_map[irq];
hlist_del_rcu(&map->msi_chain);
kfree(map);
zdev->msi_map[irq] = NULL;
spin_unlock(&msi_map_lock);
}
/*
* The msi hash table has 256 entries which is good for 4..20
* devices (a typical device allocates 10 + CPUs MSI's). Maybe make
* the hash table size adjustable later.
*/
int __init zpci_msihash_init(void)
{
unsigned int i;
msi_hash = kmalloc(256 * sizeof(*msi_hash), GFP_KERNEL);
if (!msi_hash)
return -ENOMEM;
for (i = 0; i < (1U << msihash_shift); i++)
INIT_HLIST_HEAD(&msi_hash[i]);
return 0;
}
void __init zpci_msihash_exit(void)
{
kfree(msi_hash);
}

86
arch/s390/pci/pci_sysfs.c Normal file
View File

@ -0,0 +1,86 @@
/*
* Copyright IBM Corp. 2012
*
* Author(s):
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
#define COMPONENT "zPCI"
#define pr_fmt(fmt) COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/stat.h>
#include <linux/pci.h>
static ssize_t show_fid(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
sprintf(buf, "0x%08x\n", zdev->fid);
return strlen(buf);
}
static DEVICE_ATTR(function_id, S_IRUGO, show_fid, NULL);
static ssize_t show_fh(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
sprintf(buf, "0x%08x\n", zdev->fh);
return strlen(buf);
}
static DEVICE_ATTR(function_handle, S_IRUGO, show_fh, NULL);
static ssize_t show_pchid(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
sprintf(buf, "0x%04x\n", zdev->pchid);
return strlen(buf);
}
static DEVICE_ATTR(pchid, S_IRUGO, show_pchid, NULL);
static ssize_t show_pfgid(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
sprintf(buf, "0x%02x\n", zdev->pfgid);
return strlen(buf);
}
static DEVICE_ATTR(pfgid, S_IRUGO, show_pfgid, NULL);
static struct device_attribute *zpci_dev_attrs[] = {
&dev_attr_function_id,
&dev_attr_function_handle,
&dev_attr_pchid,
&dev_attr_pfgid,
NULL,
};
int zpci_sysfs_add_device(struct device *dev)
{
int i, rc = 0;
for (i = 0; zpci_dev_attrs[i]; i++) {
rc = device_create_file(dev, zpci_dev_attrs[i]);
if (rc)
goto error;
}
return 0;
error:
while (--i >= 0)
device_remove_file(dev, zpci_dev_attrs[i]);
return rc;
}
void zpci_sysfs_remove_device(struct device *dev)
{
int i;
for (i = 0; zpci_dev_attrs[i]; i++)
device_remove_file(dev, zpci_dev_attrs[i]);
}

View File

@ -1,7 +1,7 @@
config VGA_ARB
bool "VGA Arbitration" if EXPERT
default y
depends on PCI
depends on (PCI && !S390)
help
Some "legacy" VGA devices implemented on PCI typically have the same
hard-decoded addresses as they did on ISA. When multiple PCI devices

View File

@ -151,4 +151,15 @@ config HOTPLUG_PCI_SGI
When in doubt, say N.
config HOTPLUG_PCI_S390
tristate "System z PCI Hotplug Support"
depends on S390 && 64BIT
help
Say Y here if you want to use the System z PCI Hotplug
driver for PCI devices. Without this driver it is not
possible to access stand-by PCI functions nor to deconfigure
PCI functions.
When in doubt, say Y.
endif # HOTPLUG_PCI

View File

@ -18,6 +18,7 @@ obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o
obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o
obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o
obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o
obj-$(CONFIG_HOTPLUG_PCI_S390) += s390_pci_hpc.o
# acpiphp_ibm extends acpiphp, so should be linked afterwards.

View File

@ -0,0 +1,252 @@
/*
* PCI Hot Plug Controller Driver for System z
*
* Copyright 2012 IBM Corp.
*
* Author(s):
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
#define COMPONENT "zPCI hpc"
#define pr_fmt(fmt) COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/init.h>
#include <asm/sclp.h>
#define SLOT_NAME_SIZE 10
static LIST_HEAD(s390_hotplug_slot_list);
MODULE_AUTHOR("Jan Glauber <jang@linux.vnet.ibm.com");
MODULE_DESCRIPTION("Hot Plug PCI Controller for System z");
MODULE_LICENSE("GPL");
static int zpci_fn_configured(enum zpci_state state)
{
return state == ZPCI_FN_STATE_CONFIGURED ||
state == ZPCI_FN_STATE_ONLINE;
}
/*
* struct slot - slot information for each *physical* slot
*/
struct slot {
struct list_head slot_list;
struct hotplug_slot *hotplug_slot;
struct zpci_dev *zdev;
};
static int enable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
int rc;
if (slot->zdev->state != ZPCI_FN_STATE_STANDBY)
return -EIO;
rc = sclp_pci_configure(slot->zdev->fid);
if (!rc) {
slot->zdev->state = ZPCI_FN_STATE_CONFIGURED;
/* automatically scan the device after is was configured */
zpci_enable_device(slot->zdev);
zpci_scan_device(slot->zdev);
}
return rc;
}
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
int rc;
if (!zpci_fn_configured(slot->zdev->state))
return -EIO;
/* TODO: we rely on the user to unbind/remove the device, is that plausible
* or do we need to trigger that here?
*/
rc = sclp_pci_deconfigure(slot->zdev->fid);
if (!rc) {
/* Fixme: better call List-PCI to find the disabled FH
for the FID since the FH should be opaque... */
slot->zdev->fh &= 0x7fffffff;
slot->zdev->state = ZPCI_FN_STATE_STANDBY;
}
return rc;
}
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
switch (slot->zdev->state) {
case ZPCI_FN_STATE_STANDBY:
*value = 0;
break;
default:
*value = 1;
break;
}
return 0;
}
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
/* if the slot exits it always contains a function */
*value = 1;
return 0;
}
static void release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
pr_debug("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot);
kfree(slot);
}
static struct hotplug_slot_ops s390_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.get_power_status = get_power_status,
.get_adapter_status = get_adapter_status,
};
static int init_pci_slot(struct zpci_dev *zdev)
{
struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *info;
char name[SLOT_NAME_SIZE];
struct slot *slot;
int rc;
if (!zdev)
return 0;
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
goto error;
hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
if (!hotplug_slot)
goto error_hp;
hotplug_slot->private = slot;
slot->hotplug_slot = hotplug_slot;
slot->zdev = zdev;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
goto error_info;
hotplug_slot->info = info;
hotplug_slot->ops = &s390_hotplug_slot_ops;
hotplug_slot->release = &release_slot;
get_power_status(hotplug_slot, &info->power_status);
get_adapter_status(hotplug_slot, &info->adapter_status);
snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid);
rc = pci_hp_register(slot->hotplug_slot, zdev->bus,
ZPCI_DEVFN, name);
if (rc) {
pr_err("pci_hp_register failed with error %d\n", rc);
goto error_reg;
}
list_add(&slot->slot_list, &s390_hotplug_slot_list);
return 0;
error_reg:
kfree(info);
error_info:
kfree(hotplug_slot);
error_hp:
kfree(slot);
error:
return -ENOMEM;
}
static int __init init_pci_slots(void)
{
struct zpci_dev *zdev;
int device = 0;
/*
* Create a structure for each slot, and register that slot
* with the pci_hotplug subsystem.
*/
mutex_lock(&zpci_list_lock);
list_for_each_entry(zdev, &zpci_list, entry) {
init_pci_slot(zdev);
device++;
}
mutex_unlock(&zpci_list_lock);
return (device) ? 0 : -ENODEV;
}
static void exit_pci_slot(struct zpci_dev *zdev)
{
struct list_head *tmp, *n;
struct slot *slot;
list_for_each_safe(tmp, n, &s390_hotplug_slot_list) {
slot = list_entry(tmp, struct slot, slot_list);
if (slot->zdev != zdev)
continue;
list_del(&slot->slot_list);
pci_hp_deregister(slot->hotplug_slot);
}
}
static void __exit exit_pci_slots(void)
{
struct list_head *tmp, *n;
struct slot *slot;
/*
* Unregister all of our slots with the pci_hotplug subsystem.
* Memory will be freed in release_slot() callback after slot's
* lifespan is finished.
*/
list_for_each_safe(tmp, n, &s390_hotplug_slot_list) {
slot = list_entry(tmp, struct slot, slot_list);
list_del(&slot->slot_list);
pci_hp_deregister(slot->hotplug_slot);
}
}
static int __init pci_hotplug_s390_init(void)
{
/*
* Do specific initialization stuff for your driver here
* like initializing your controller hardware (if any) and
* determining the number of slots you have in the system
* right now.
*/
if (!pci_probe)
return -EOPNOTSUPP;
/* register callbacks for slot handling from arch code */
mutex_lock(&zpci_list_lock);
hotplug_ops.create_slot = init_pci_slot;
hotplug_ops.remove_slot = exit_pci_slot;
mutex_unlock(&zpci_list_lock);
pr_info("registered hotplug slot callbacks\n");
return init_pci_slots();
}
static void __exit pci_hotplug_s390_exit(void)
{
exit_pci_slots();
}
module_init(pci_hotplug_s390_init);
module_exit(pci_hotplug_s390_exit);

View File

@ -207,6 +207,8 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag)
desc->masked = __msix_mask_irq(desc, flag);
}
#ifdef CONFIG_GENERIC_HARDIRQS
static void msi_set_mask_bit(struct irq_data *data, u32 flag)
{
struct msi_desc *desc = irq_data_get_msi(data);
@ -230,6 +232,8 @@ void unmask_msi_irq(struct irq_data *data)
msi_set_mask_bit(data, 0);
}
#endif /* CONFIG_GENERIC_HARDIRQS */
void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
BUG_ON(entry->dev->current_state != PCI_D0);
@ -337,8 +341,10 @@ static void free_msi_irqs(struct pci_dev *dev)
if (!entry->irq)
continue;
nvec = 1 << entry->msi_attrib.multiple;
#ifdef CONFIG_GENERIC_HARDIRQS
for (i = 0; i < nvec; i++)
BUG_ON(irq_has_action(entry->irq + i));
#endif
}
arch_teardown_msi_irqs(dev);

View File

@ -349,6 +349,16 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
return rc;
}
static inline
int _wait_for_empty_queues(struct dasd_device *device)
{
if (device->block)
return list_empty(&device->ccw_queue) &&
list_empty(&device->block->ccw_queue);
else
return list_empty(&device->ccw_queue);
}
/*
* Remove device from block device layer. Destroy dirty buffers.
* Forget format information. Check if the target level is basic
@ -1841,6 +1851,13 @@ static void __dasd_device_check_expire(struct dasd_device *device)
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
(time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
/*
* IO in safe offline processing should not
* run out of retries
*/
cqr->retries++;
}
if (device->discipline->term_IO(cqr) != 0) {
/* Hmpf, try again in 5 sec */
dev_err(&device->cdev->dev,
@ -3024,11 +3041,11 @@ void dasd_generic_remove(struct ccw_device *cdev)
cdev->handler = NULL;
dasd_remove_sysfs_files(cdev);
device = dasd_device_from_cdev(cdev);
if (IS_ERR(device))
return;
if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
/* Already doing offline processing */
dasd_put_device(device);
return;
@ -3048,6 +3065,8 @@ void dasd_generic_remove(struct ccw_device *cdev)
*/
if (block)
dasd_free_block(block);
dasd_remove_sysfs_files(cdev);
}
/*
@ -3126,16 +3145,13 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
{
struct dasd_device *device;
struct dasd_block *block;
int max_count, open_count;
int max_count, open_count, rc;
rc = 0;
device = dasd_device_from_cdev(cdev);
if (IS_ERR(device))
return PTR_ERR(device);
if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
/* Already doing offline processing */
dasd_put_device(device);
return 0;
}
/*
* We must make sure that this device is currently not in use.
* The open_count is increased for every opener, that includes
@ -3159,6 +3175,54 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
return -EBUSY;
}
}
if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
/*
* safe offline allready running
* could only be called by normal offline so safe_offline flag
* needs to be removed to run normal offline and kill all I/O
*/
if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
/* Already doing normal offline processing */
dasd_put_device(device);
return -EBUSY;
} else
clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
} else
if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
/* Already doing offline processing */
dasd_put_device(device);
return -EBUSY;
}
/*
* if safe_offline called set safe_offline_running flag and
* clear safe_offline so that a call to normal offline
* can overrun safe_offline processing
*/
if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
!test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
/*
* If we want to set the device safe offline all IO operations
* should be finished before continuing the offline process
* so sync bdev first and then wait for our queues to become
* empty
*/
/* sync blockdev and partitions */
rc = fsync_bdev(device->block->bdev);
if (rc != 0)
goto interrupted;
/* schedule device tasklet and wait for completion */
dasd_schedule_device_bh(device);
rc = wait_event_interruptible(shutdown_waitq,
_wait_for_empty_queues(device));
if (rc != 0)
goto interrupted;
}
set_bit(DASD_FLAG_OFFLINE, &device->flags);
dasd_set_target_state(device, DASD_STATE_NEW);
/* dasd_delete_device destroys the device reference. */
block = device->block;
@ -3170,6 +3234,14 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
if (block)
dasd_free_block(block);
return 0;
interrupted:
/* interrupted by signal */
clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
clear_bit(DASD_FLAG_OFFLINE, &device->flags);
dasd_put_device(device);
return rc;
}
int dasd_generic_last_path_gone(struct dasd_device *device)
@ -3489,15 +3561,6 @@ char *dasd_get_sense(struct irb *irb)
}
EXPORT_SYMBOL_GPL(dasd_get_sense);
static inline int _wait_for_empty_queues(struct dasd_device *device)
{
if (device->block)
return list_empty(&device->ccw_queue) &&
list_empty(&device->block->ccw_queue);
else
return list_empty(&device->ccw_queue);
}
void dasd_generic_shutdown(struct ccw_device *cdev)
{
struct dasd_device *device;

View File

@ -951,6 +951,39 @@ dasd_use_raw_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(raw_track_access, 0644, dasd_use_raw_show,
dasd_use_raw_store);
static ssize_t
dasd_safe_offline_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct dasd_device *device;
int rc;
device = dasd_device_from_cdev(cdev);
if (IS_ERR(device)) {
rc = PTR_ERR(device);
goto out;
}
if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
/* Already doing offline processing */
dasd_put_device(device);
rc = -EBUSY;
goto out;
}
set_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
dasd_put_device(device);
rc = ccw_device_set_offline(cdev);
out:
return rc ? rc : count;
}
static DEVICE_ATTR(safe_offline, 0200, NULL, dasd_safe_offline_store);
static ssize_t
dasd_discipline_show(struct device *dev, struct device_attribute *attr,
char *buf)
@ -1320,6 +1353,7 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_expires.attr,
&dev_attr_reservation_policy.attr,
&dev_attr_last_known_reservation_state.attr,
&dev_attr_safe_offline.attr,
NULL,
};

View File

@ -1026,7 +1026,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
{
void *conf_data;
int conf_len, conf_data_saved;
int rc;
int rc, path_err;
__u8 lpm, opm;
struct dasd_eckd_private *private, path_private;
struct dasd_path *path_data;
@ -1037,6 +1037,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
path_data = &device->path_data;
opm = ccw_device_get_path_mask(device->cdev);
conf_data_saved = 0;
path_err = 0;
/* get configuration data per operational path */
for (lpm = 0x80; lpm; lpm>>= 1) {
if (!(lpm & opm))
@ -1122,7 +1123,8 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
"the same device, path %02X leads to "
"device %s instead of %s\n", lpm,
print_path_uid, print_device_uid);
return -EINVAL;
path_err = -EINVAL;
continue;
}
path_private.conf_data = NULL;
@ -1142,7 +1144,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
kfree(conf_data);
}
return 0;
return path_err;
}
static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
@ -3847,7 +3849,7 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
len = 0;
while (from <= to) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
from, ((int *) from)[0], ((int *) from)[1]);
@ -3908,23 +3910,23 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
return;
}
/* dump the sense data */
len = sprintf(page, KERN_ERR PRINTK_HEADER
len = sprintf(page, PRINTK_HEADER
" I/O status report for device %s:\n",
dev_name(&device->cdev->dev));
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
"CS:%02X RC:%d\n",
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
req ? req->intrc : 0);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" device %s: Failing CCW: %p\n",
dev_name(&device->cdev->dev),
(void *) (addr_t) irb->scsw.cmd.cpa);
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
@ -3937,23 +3939,23 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
if (irb->ecw[27] & DASD_SENSE_BIT_0) {
/* 24 Byte Sense Data */
sprintf(page + len, KERN_ERR PRINTK_HEADER
sprintf(page + len, PRINTK_HEADER
" 24 Byte: %x MSG %x, "
"%s MSGb to SYSOP\n",
irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
irb->ecw[1] & 0x10 ? "" : "no");
} else {
/* 32 Byte Sense Data */
sprintf(page + len, KERN_ERR PRINTK_HEADER
sprintf(page + len, PRINTK_HEADER
" 32 Byte: Format: %x "
"Exception class %x\n",
irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
}
} else {
sprintf(page + len, KERN_ERR PRINTK_HEADER
sprintf(page + len, PRINTK_HEADER
" SORRY - NO VALID SENSE AVAILABLE\n");
}
printk("%s", page);
printk(KERN_ERR "%s", page);
if (req) {
/* req == NULL for unsolicited interrupts */
@ -3962,10 +3964,10 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
first = req->cpaddr;
for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
to = min(first + 6, last);
len = sprintf(page, KERN_ERR PRINTK_HEADER
len = sprintf(page, PRINTK_HEADER
" Related CP in req: %p\n", req);
dasd_eckd_dump_ccw_range(first, to, page + len);
printk("%s", page);
printk(KERN_ERR "%s", page);
/* print failing CCW area (maximum 4) */
/* scsw->cda is either valid or zero */
@ -3975,7 +3977,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
irb->scsw.cmd.cpa; /* failing CCW */
if (from < fail - 2) {
from = fail - 2; /* there is a gap - print header */
len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
len += sprintf(page, PRINTK_HEADER "......\n");
}
to = min(fail + 1, last);
len += dasd_eckd_dump_ccw_range(from, to, page + len);
@ -3984,11 +3986,11 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
from = max(from, ++to);
if (from < last - 1) {
from = last - 1; /* there is a gap - print header */
len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
len += sprintf(page + len, PRINTK_HEADER "......\n");
}
len += dasd_eckd_dump_ccw_range(from, last, page + len);
if (len > 0)
printk("%s", page);
printk(KERN_ERR "%s", page);
}
free_page((unsigned long) page);
}
@ -4012,10 +4014,10 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
return;
}
/* dump the sense data */
len = sprintf(page, KERN_ERR PRINTK_HEADER
len = sprintf(page, PRINTK_HEADER
" I/O status report for device %s:\n",
dev_name(&device->cdev->dev));
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
"CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
@ -4023,7 +4025,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
irb->scsw.tm.fcxs, irb->scsw.tm.schxs,
req ? req->intrc : 0);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" device %s: Failing TCW: %p\n",
dev_name(&device->cdev->dev),
(void *) (addr_t) irb->scsw.tm.tcw);
@ -4035,43 +4037,42 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
(struct tcw *)(unsigned long)irb->scsw.tm.tcw);
if (tsb) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" tsb->length %d\n", tsb->length);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" tsb->flags %x\n", tsb->flags);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" tsb->dcw_offset %d\n", tsb->dcw_offset);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" tsb->count %d\n", tsb->count);
residual = tsb->count - 28;
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" residual %d\n", residual);
switch (tsb->flags & 0x07) {
case 1: /* tsa_iostat */
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.dev_time %d\n",
tsb->tsa.iostat.dev_time);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.def_time %d\n",
tsb->tsa.iostat.def_time);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.queue_time %d\n",
tsb->tsa.iostat.queue_time);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.dev_busy_time %d\n",
tsb->tsa.iostat.dev_busy_time);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.dev_act_time %d\n",
tsb->tsa.iostat.dev_act_time);
sense = tsb->tsa.iostat.sense;
break;
case 2: /* ts_ddpc */
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
for (sl = 0; sl < 2; sl++) {
len += sprintf(page + len,
KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.ddpc.rcq %2d-%2d: ",
(8 * sl), ((8 * sl) + 7));
rcq = tsb->tsa.ddpc.rcq;
@ -4084,15 +4085,14 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
sense = tsb->tsa.ddpc.sense;
break;
case 3: /* tsa_intrg */
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" tsb->tsa.intrg.: not supportet yet \n");
len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.intrg.: not supportet yet\n");
break;
}
if (sense) {
for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len,
KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
@ -4104,27 +4104,27 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
if (sense[27] & DASD_SENSE_BIT_0) {
/* 24 Byte Sense Data */
sprintf(page + len, KERN_ERR PRINTK_HEADER
sprintf(page + len, PRINTK_HEADER
" 24 Byte: %x MSG %x, "
"%s MSGb to SYSOP\n",
sense[7] >> 4, sense[7] & 0x0f,
sense[1] & 0x10 ? "" : "no");
} else {
/* 32 Byte Sense Data */
sprintf(page + len, KERN_ERR PRINTK_HEADER
sprintf(page + len, PRINTK_HEADER
" 32 Byte: Format: %x "
"Exception class %x\n",
sense[6] & 0x0f, sense[22] >> 4);
}
} else {
sprintf(page + len, KERN_ERR PRINTK_HEADER
sprintf(page + len, PRINTK_HEADER
" SORRY - NO VALID SENSE AVAILABLE\n");
}
} else {
sprintf(page + len, KERN_ERR PRINTK_HEADER
sprintf(page + len, PRINTK_HEADER
" SORRY - NO TSB DATA AVAILABLE\n");
}
printk("%s", page);
printk(KERN_ERR "%s", page);
free_page((unsigned long) page);
}
@ -4161,9 +4161,7 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
private = (struct dasd_eckd_private *) device->private;
/* Read Configuration Data */
rc = dasd_eckd_read_conf(device);
if (rc)
goto out_err;
dasd_eckd_read_conf(device);
dasd_eckd_get_uid(device, &temp_uid);
/* Generate device unique id */
@ -4183,9 +4181,7 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
dasd_eckd_validate_server(device, DASD_CQR_FLAGS_FAILFAST);
/* RE-Read Configuration Data */
rc = dasd_eckd_read_conf(device);
if (rc)
goto out_err;
dasd_eckd_read_conf(device);
/* Read Feature Codes */
dasd_eckd_read_features(device);

View File

@ -479,19 +479,19 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
"No memory to dump sense data");
return;
}
len = sprintf(page, KERN_ERR PRINTK_HEADER
len = sprintf(page, PRINTK_HEADER
" I/O status report for device %s:\n",
dev_name(&device->cdev->dev));
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" in req: %p CS: 0x%02X DS: 0x%02X\n", req,
irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" device %s: Failing CCW: %p\n",
dev_name(&device->cdev->dev),
(void *) (addr_t) irb->scsw.cmd.cpa);
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
@ -502,7 +502,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
len += sprintf(page + len, "\n");
}
} else {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" SORRY - NO VALID SENSE AVAILABLE\n");
}
printk(KERN_ERR "%s", page);
@ -512,10 +512,9 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
act = req->cpaddr;
for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
end = min(act + 8, last);
len = sprintf(page, KERN_ERR PRINTK_HEADER
" Related CP in req: %p\n", req);
len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req);
while (act <= end) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
@ -533,11 +532,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
len = 0;
if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
len += sprintf(page + len, PRINTK_HEADER "......\n");
}
end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
while (act <= end) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
@ -552,10 +551,10 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
/* print last CCWs */
if (act < last - 2) {
act = last - 2;
len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
len += sprintf(page + len, PRINTK_HEADER "......\n");
}
while (act <= last) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;

View File

@ -516,6 +516,8 @@ struct dasd_block {
#define DASD_FLAG_IS_RESERVED 7 /* The device is reserved */
#define DASD_FLAG_LOCK_STOLEN 8 /* The device lock was stolen */
#define DASD_FLAG_SUSPENDED 9 /* The device was suspended */
#define DASD_FLAG_SAFE_OFFLINE 10 /* safe offline processing requested*/
#define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */
void dasd_put_device_wake(struct dasd_device *);

View File

@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <asm/compat.h>
#include <asm/ccwdev.h>
#include <asm/schid.h>
#include <asm/cmb.h>
#include <asm/uaccess.h>
@ -308,11 +309,12 @@ static int dasd_ioctl_information(struct dasd_block *block,
unsigned int cmd, void __user *argp)
{
struct dasd_information2_t *dasd_info;
unsigned long flags;
int rc;
struct subchannel_id sch_id;
struct ccw_dev_id dev_id;
struct dasd_device *base;
struct ccw_device *cdev;
struct ccw_dev_id dev_id;
unsigned long flags;
int rc;
base = block->base;
if (!base->discipline || !base->discipline->fill_info)
@ -330,9 +332,10 @@ static int dasd_ioctl_information(struct dasd_block *block,
cdev = base->cdev;
ccw_device_get_id(cdev, &dev_id);
ccw_device_get_schid(cdev, &sch_id);
dasd_info->devno = dev_id.devno;
dasd_info->schid = _ccw_device_get_subchannel_number(base->cdev);
dasd_info->schid = sch_id.sch_no;
dasd_info->cu_type = cdev->id.cu_type;
dasd_info->cu_model = cdev->id.cu_model;
dasd_info->dev_type = cdev->id.dev_type;

View File

@ -1,5 +1,5 @@
/*
* Copyright IBM Corp. 1999, 2009
* Copyright IBM Corp. 1999,2012
*
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
@ -103,6 +103,7 @@ extern u64 sclp_facilities;
#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL)
#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL)
#define SCLP_HAS_PCI_RECONFIG (sclp_facilities & 0x0000000040000000ULL)
struct gds_subvector {

View File

@ -1,5 +1,5 @@
/*
* Copyright IBM Corp. 2007, 2009
* Copyright IBM Corp. 2007,2012
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/mm.h>
@ -19,10 +20,11 @@
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <asm/chpid.h>
#include <asm/sclp.h>
#include <asm/setup.h>
#include <asm/ctl_reg.h>
#include <asm/chpid.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/sclp.h>
#include "sclp.h"
@ -400,17 +402,15 @@ out:
static int sclp_assign_storage(u16 rn)
{
unsigned long long start, address;
unsigned long long start;
int rc;
rc = do_assign_storage(0x000d0001, rn);
if (rc)
goto out;
start = address = rn2addr(rn);
for (; address < start + rzm; address += PAGE_SIZE)
page_set_storage_key(address, PAGE_DEFAULT_KEY, 0);
out:
return rc;
return rc;
start = rn2addr(rn);
storage_key_init_range(start, start + rzm);
return 0;
}
static int sclp_unassign_storage(u16 rn)
@ -701,6 +701,67 @@ __initcall(sclp_detect_standby_memory);
#endif /* CONFIG_MEMORY_HOTPLUG */
/*
* PCI I/O adapter configuration related functions.
*/
#define SCLP_CMDW_CONFIGURE_PCI 0x001a0001
#define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001
#define SCLP_RECONFIG_PCI_ATPYE 2
struct pci_cfg_sccb {
struct sccb_header header;
u8 atype; /* adapter type */
u8 reserved1;
u16 reserved2;
u32 aid; /* adapter identifier */
} __packed;
static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
{
struct pci_cfg_sccb *sccb;
int rc;
if (!SCLP_HAS_PCI_RECONFIG)
return -EOPNOTSUPP;
sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
sccb->atype = SCLP_RECONFIG_PCI_ATPYE;
sccb->aid = fid;
rc = do_sync_request(cmd, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
break;
default:
pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n",
cmd, sccb->header.response_code);
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
int sclp_pci_configure(u32 fid)
{
return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
}
EXPORT_SYMBOL(sclp_pci_configure);
int sclp_pci_deconfigure(u32 fid)
{
return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
}
EXPORT_SYMBOL(sclp_pci_deconfigure);
/*
* Channel path configuration related functions.
*/

View File

@ -65,10 +65,18 @@ static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
}
}
static int ccwgroup_set_online(struct ccwgroup_device *gdev)
/**
* ccwgroup_set_online() - enable a ccwgroup device
* @gdev: target ccwgroup device
*
* This function attempts to put the ccwgroup device into the online state.
* Returns:
* %0 on success and a negative error value on failure.
*/
int ccwgroup_set_online(struct ccwgroup_device *gdev)
{
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
int ret = 0;
int ret = -EINVAL;
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
@ -84,11 +92,20 @@ out:
atomic_set(&gdev->onoff, 0);
return ret;
}
EXPORT_SYMBOL(ccwgroup_set_online);
static int ccwgroup_set_offline(struct ccwgroup_device *gdev)
/**
* ccwgroup_set_offline() - disable a ccwgroup device
* @gdev: target ccwgroup device
*
* This function attempts to put the ccwgroup device into the offline state.
* Returns:
* %0 on success and a negative error value on failure.
*/
int ccwgroup_set_offline(struct ccwgroup_device *gdev)
{
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
int ret = 0;
int ret = -EINVAL;
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
@ -104,6 +121,7 @@ out:
atomic_set(&gdev->onoff, 0);
return ret;
}
EXPORT_SYMBOL(ccwgroup_set_offline);
static ssize_t ccwgroup_online_store(struct device *dev,
struct device_attribute *attr,

View File

@ -1,7 +1,7 @@
/*
* S/390 common I/O routines -- channel subsystem call
*
* Copyright IBM Corp. 1999, 2010
* Copyright IBM Corp. 1999,2012
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <asm/cio.h>
#include <asm/chpid.h>
@ -260,26 +261,45 @@ __get_chpid_from_lir(void *data)
return (u16) (lir->indesc[0]&0x000000ff);
}
struct chsc_sei_area {
struct chsc_header request;
struct chsc_sei_nt0_area {
u8 flags;
u8 vf; /* validity flags */
u8 rs; /* reporting source */
u8 cc; /* content code */
u16 fla; /* full link address */
u16 rsid; /* reporting source id */
u32 reserved1;
u32 reserved2;
u32 reserved3;
struct chsc_header response;
u32 reserved4;
u8 flags;
u8 vf; /* validity flags */
u8 rs; /* reporting source */
u8 cc; /* content code */
u16 fla; /* full link address */
u16 rsid; /* reporting source id */
u32 reserved5;
u32 reserved6;
u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
/* ccdf has to be big enough for a link-incident record */
} __attribute__ ((packed));
u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
} __packed;
static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
struct chsc_sei_nt2_area {
u8 flags; /* p and v bit */
u8 reserved1;
u8 reserved2;
u8 cc; /* content code */
u32 reserved3[13];
u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
} __packed;
#define CHSC_SEI_NT0 0ULL
#define CHSC_SEI_NT2 (1ULL << 61)
struct chsc_sei {
struct chsc_header request;
u32 reserved1;
u64 ntsm; /* notification type mask */
struct chsc_header response;
u32 reserved2;
union {
struct chsc_sei_nt0_area nt0_area;
struct chsc_sei_nt2_area nt2_area;
u8 nt_area[PAGE_SIZE - 24];
} u;
} __packed;
static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
{
struct chp_id chpid;
int id;
@ -298,7 +318,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
}
}
static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
{
struct chp_link link;
struct chp_id chpid;
@ -330,7 +350,7 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
s390_process_res_acc(&link);
}
static void chsc_process_sei_chp_avail(struct chsc_sei_area *sei_area)
static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
{
struct channel_path *chp;
struct chp_id chpid;
@ -366,7 +386,7 @@ struct chp_config_data {
u8 pc;
};
static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
{
struct chp_config_data *data;
struct chp_id chpid;
@ -398,7 +418,7 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
}
}
static void chsc_process_sei_scm_change(struct chsc_sei_area *sei_area)
static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
{
int ret;
@ -412,13 +432,26 @@ static void chsc_process_sei_scm_change(struct chsc_sei_area *sei_area)
" failed (rc=%d).\n", ret);
}
static void chsc_process_sei(struct chsc_sei_area *sei_area)
static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
{
/* Check if we might have lost some information. */
if (sei_area->flags & 0x40) {
CIO_CRW_EVENT(2, "chsc: event overflow\n");
css_schedule_eval_all();
#ifdef CONFIG_PCI
switch (sei_area->cc) {
case 1:
zpci_event_error(sei_area->ccdf);
break;
case 2:
zpci_event_availability(sei_area->ccdf);
break;
default:
CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n",
sei_area->cc);
break;
}
#endif
}
static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
{
/* which kind of information was stored? */
switch (sei_area->cc) {
case 1: /* link incident*/
@ -443,9 +476,51 @@ static void chsc_process_sei(struct chsc_sei_area *sei_area)
}
}
static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm)
{
do {
memset(sei, 0, sizeof(*sei));
sei->request.length = 0x0010;
sei->request.code = 0x000e;
sei->ntsm = ntsm;
if (chsc(sei))
break;
if (sei->response.code == 0x0001) {
CIO_CRW_EVENT(2, "chsc: sei successful\n");
/* Check if we might have lost some information. */
if (sei->u.nt0_area.flags & 0x40) {
CIO_CRW_EVENT(2, "chsc: event overflow\n");
css_schedule_eval_all();
}
switch (sei->ntsm) {
case CHSC_SEI_NT0:
chsc_process_sei_nt0(&sei->u.nt0_area);
return 1;
case CHSC_SEI_NT2:
chsc_process_sei_nt2(&sei->u.nt2_area);
return 1;
default:
CIO_CRW_EVENT(2, "chsc: unhandled nt (nt=%08Lx)\n",
sei->ntsm);
return 0;
}
} else {
CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
sei->response.code);
break;
}
} while (sei->u.nt0_area.flags & 0x80);
return 0;
}
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{
struct chsc_sei_area *sei_area;
struct chsc_sei *sei;
if (overflow) {
css_schedule_eval_all();
@ -459,25 +534,18 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
return;
/* Access to sei_page is serialized through machine check handler
* thread, so no need for locking. */
sei_area = sei_page;
sei = sei_page;
CIO_TRACE_EVENT(2, "prcss");
do {
memset(sei_area, 0, sizeof(*sei_area));
sei_area->request.length = 0x0010;
sei_area->request.code = 0x000e;
if (chsc(sei_area))
break;
if (sei_area->response.code == 0x0001) {
CIO_CRW_EVENT(4, "chsc: sei successful\n");
chsc_process_sei(sei_area);
} else {
CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
sei_area->response.code);
break;
}
} while (sei_area->flags & 0x80);
/*
* The ntsm does not allow to select NT0 and NT2 together. We need to
* first check for NT2, than additionally for NT0...
*/
#ifdef CONFIG_PCI
if (!__chsc_process_crw(sei, CHSC_SEI_NT2))
#endif
__chsc_process_crw(sei, CHSC_SEI_NT0);
}
void chsc_chp_online(struct chp_id chpid)

View File

@ -2036,16 +2036,6 @@ void ccw_driver_unregister(struct ccw_driver *cdriver)
driver_unregister(&cdriver->driver);
}
/* Helper func for qdio. */
struct subchannel_id
ccw_device_get_subchannel_id(struct ccw_device *cdev)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
return sch->schid;
}
static void ccw_device_todo(struct work_struct *work)
{
struct ccw_device_private *priv;
@ -2138,4 +2128,3 @@ EXPORT_SYMBOL(ccw_device_set_offline);
EXPORT_SYMBOL(ccw_driver_register);
EXPORT_SYMBOL(ccw_driver_unregister);
EXPORT_SYMBOL(get_ccwdev_by_busid);
EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);

View File

@ -142,9 +142,7 @@ int ccw_device_notify(struct ccw_device *, int);
void ccw_device_set_disconnected(struct ccw_device *cdev);
void ccw_device_set_notoper(struct ccw_device *cdev);
/* qdio needs this. */
void ccw_device_set_timeout(struct ccw_device *, int);
extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
/* Channel measurement facility related */
void retry_set_schib(struct ccw_device *cdev);

View File

@ -755,14 +755,18 @@ int ccw_device_tm_intrg(struct ccw_device *cdev)
}
EXPORT_SYMBOL(ccw_device_tm_intrg);
// FIXME: these have to go:
int
_ccw_device_get_subchannel_number(struct ccw_device *cdev)
/**
* ccw_device_get_schid - obtain a subchannel id
* @cdev: device to obtain the id for
* @schid: where to fill in the values
*/
void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
{
return cdev->private->schid.sch_no;
}
struct subchannel *sch = to_subchannel(cdev->dev.parent);
*schid = sch->schid;
}
EXPORT_SYMBOL_GPL(ccw_device_get_schid);
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_options_mask);
@ -777,5 +781,4 @@ EXPORT_SYMBOL(ccw_device_start_timeout_key);
EXPORT_SYMBOL(ccw_device_start_key);
EXPORT_SYMBOL(ccw_device_get_ciw);
EXPORT_SYMBOL(ccw_device_get_path_mask);
EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);

View File

@ -234,7 +234,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
* Determine pathgroup state from PGID data.
*/
static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
int *mismatch, int *reserved, u8 *reset)
int *mismatch, u8 *reserved, u8 *reset)
{
struct pgid *pgid = &cdev->private->pgid[0];
struct pgid *first = NULL;
@ -248,7 +248,7 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
if ((cdev->private->pgid_valid_mask & lpm) == 0)
continue;
if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
*reserved = 1;
*reserved |= lpm;
if (pgid_is_reset(pgid)) {
*reset |= lpm;
continue;
@ -316,14 +316,14 @@ static void snid_done(struct ccw_device *cdev, int rc)
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct pgid *pgid;
int mismatch = 0;
int reserved = 0;
u8 reserved = 0;
u8 reset = 0;
u8 donepm;
if (rc)
goto out;
pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
if (reserved)
if (reserved == cdev->private->pgid_valid_mask)
rc = -EUSERS;
else if (mismatch)
rc = -EOPNOTSUPP;
@ -336,7 +336,7 @@ static void snid_done(struct ccw_device *cdev, int rc)
}
out:
CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
"todo=%02x mism=%d rsvd=%d reset=%02x\n", id->ssid,
"todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
cdev->private->pgid_todo_mask, mismatch, reserved, reset);
switch (rc) {

View File

@ -129,7 +129,6 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
unsigned int ccq = 0;
BUG_ON(!q->irq_ptr->sch_token);
qperf_inc(q, eqbs);
if (!q->is_input_q)
@ -147,7 +146,6 @@ again:
}
if (rc == 2) {
BUG_ON(tmp_count == count);
qperf_inc(q, eqbs_partial);
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
tmp_count);
@ -189,8 +187,6 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
if (!count)
return 0;
BUG_ON(!q->irq_ptr->sch_token);
qperf_inc(q, sqbs);
if (!q->is_input_q)
@ -199,7 +195,7 @@ again:
ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
rc = qdio_check_ccq(q, ccq);
if (!rc) {
WARN_ON(tmp_count);
WARN_ON_ONCE(tmp_count);
return count - tmp_count;
}
@ -224,9 +220,6 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
unsigned char __state = 0;
int i;
BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
@ -258,9 +251,6 @@ static inline int set_buf_states(struct qdio_q *q, int bufnr,
{
int i;
BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
if (is_qebsm(q))
return qdio_do_sqbs(q, state, bufnr, count);
@ -345,7 +335,6 @@ again:
/* hipersocket busy condition */
if (unlikely(*busy_bit)) {
WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
retries++;
if (!start_time) {
@ -559,7 +548,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
break;
default:
BUG();
WARN_ON_ONCE(1);
}
out:
return q->first_to_check;
@ -678,12 +667,10 @@ static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
if (aob == NULL)
continue;
BUG_ON(q->u.out.sbal_state == NULL);
q->u.out.sbal_state[b].flags |=
QDIO_OUTBUF_STATE_FLAG_PENDING;
q->u.out.aobs[b] = NULL;
} else if (state == SLSB_P_OUTPUT_EMPTY) {
BUG_ON(q->u.out.sbal_state == NULL);
q->u.out.sbal_state[b].aob = NULL;
}
b = next_buf(b);
@ -703,12 +690,11 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
q->aobs[bufnr] = aob;
}
if (q->aobs[bufnr]) {
BUG_ON(q->sbal_state == NULL);
q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
q->sbal_state[bufnr].aob = q->aobs[bufnr];
q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
phys_aob = virt_to_phys(q->aobs[bufnr]);
BUG_ON(phys_aob & 0xFF);
WARN_ON_ONCE(phys_aob & 0xFF);
}
out:
@ -809,8 +795,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
goto out;
switch (state) {
case SLSB_P_OUTPUT_PENDING:
BUG();
case SLSB_P_OUTPUT_EMPTY:
/* the adapter got it */
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
@ -840,7 +824,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
case SLSB_P_OUTPUT_HALTED:
break;
default:
BUG();
WARN_ON_ONCE(1);
}
out:
@ -912,7 +896,7 @@ retry:
static void __qdio_outbound_processing(struct qdio_q *q)
{
qperf_inc(q, tasklet_outbound);
BUG_ON(atomic_read(&q->nr_buf_used) < 0);
WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
if (qdio_outbound_q_moved(q))
qdio_kick_handler(q);
@ -1138,16 +1122,10 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
irq_ptr->perf_stat.qdio_int++;
if (IS_ERR(irb)) {
switch (PTR_ERR(irb)) {
case -EIO:
DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
wake_up(&cdev->private->wait_q);
return;
default:
WARN_ON(1);
return;
}
DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
wake_up(&cdev->private->wait_q);
return;
}
qdio_irq_check_sense(irq_ptr, irb);
cstat = irb->scsw.cmd.cstat;
@ -1173,7 +1151,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
case QDIO_IRQ_STATE_STOPPED:
break;
default:
WARN_ON(1);
WARN_ON_ONCE(1);
}
wake_up(&cdev->private->wait_q);
}
@ -1227,7 +1205,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
if (!irq_ptr)
return -ENODEV;
BUG_ON(irqs_disabled());
WARN_ON_ONCE(irqs_disabled());
DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
mutex_lock(&irq_ptr->setup_mutex);
@ -1358,7 +1336,6 @@ int qdio_allocate(struct qdio_initialize *init_data)
irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!irq_ptr->qdr)
goto out_rel;
WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
init_data->no_output_qs))
@ -1597,9 +1574,7 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
set:
count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
used = atomic_add_return(count, &q->nr_buf_used) - count;
BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
if (need_siga_in(q))
return qdio_siga_input(q);
@ -1624,7 +1599,6 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
used = atomic_add_return(count, &q->nr_buf_used);
BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
if (used == QDIO_MAX_BUFFERS_PER_Q)
qperf_inc(q, outbound_queue_full);
@ -1678,7 +1652,6 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
{
struct qdio_irq *irq_ptr;
if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
return -EINVAL;
@ -1721,8 +1694,6 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
return -ENODEV;
q = irq_ptr->input_qs[nr];
WARN_ON(queue_irqs_enabled(q));
clear_nonshared_ind(irq_ptr);
qdio_stop_polling(q);
clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
@ -1769,7 +1740,6 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
if (!irq_ptr)
return -ENODEV;
q = irq_ptr->input_qs[nr];
WARN_ON(queue_irqs_enabled(q));
/*
* Cannot rely on automatic sync after interrupt since queues may

View File

@ -140,10 +140,8 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
/* fill in sbal */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->sbal[j] = *sbals_array++;
BUG_ON((unsigned long)q->sbal[j] & 0xff);
}
/* fill in slib */
if (i > 0) {
@ -434,9 +432,8 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
irq_ptr->int_parm = init_data->int_parm;
irq_ptr->nr_input_qs = init_data->no_input_qs;
irq_ptr->nr_output_qs = init_data->no_output_qs;
irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
irq_ptr->cdev = init_data->cdev;
ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
setup_queues(irq_ptr, init_data);
setup_qib(irq_ptr, init_data);
@ -483,7 +480,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
char s[80];
snprintf(s, 80, "qdio: %s %s on SC %x using "
"AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s\n",
"AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
dev_name(&cdev->dev),
(irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),

View File

@ -73,7 +73,6 @@ static void put_indicator(u32 *addr)
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
{
mutex_lock(&tiq_list_lock);
BUG_ON(irq_ptr->nr_input_qs < 1);
list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
mutex_unlock(&tiq_list_lock);
xchg(irq_ptr->dsci, 1 << 7);
@ -83,7 +82,6 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
BUG_ON(irq_ptr->nr_input_qs < 1);
q = irq_ptr->input_qs[0];
/* if establish triggered an error */
if (!q || !q->entry.prev || !q->entry.next)

View File

@ -241,84 +241,70 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
struct ap_message *ap_msg,
struct ica_rsa_modexpo_crt *crt)
{
int mod_len, short_len, long_len, long_offset, limit;
int mod_len, short_len;
unsigned char *p, *q, *dp, *dq, *u, *inp;
mod_len = crt->inputdatalength;
short_len = mod_len / 2;
long_len = mod_len / 2 + 8;
/*
* CEX2A cannot handle p, dp, or U > 128 bytes.
* If we have one of these, we need to do extra checking.
* For CEX3A the limit is 256 bytes.
* CEX2A and CEX3A w/o FW update can handle requests up to
* 256 byte modulus (2k keys).
* CEX3A with FW update and CEX4A cards are able to handle
* 512 byte modulus (4k keys).
*/
if (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)
limit = 256;
else
limit = 128;
if (long_len > limit) {
/*
* zcrypt_rsa_crt already checked for the leading
* zeroes of np_prime, bp_key and u_mult_inc.
*/
long_offset = long_len - limit;
long_len = limit;
} else
long_offset = 0;
/*
* Instead of doing extra work for p, dp, U > 64 bytes, we'll just use
* the larger message structure.
*/
if (long_len <= 64) {
if (mod_len <= 128) { /* up to 1024 bit key size */
struct type50_crb1_msg *crb1 = ap_msg->message;
memset(crb1, 0, sizeof(*crb1));
ap_msg->length = sizeof(*crb1);
crb1->header.msg_type_code = TYPE50_TYPE_CODE;
crb1->header.msg_len = sizeof(*crb1);
crb1->keyblock_type = TYPE50_CRB1_FMT;
p = crb1->p + sizeof(crb1->p) - long_len;
p = crb1->p + sizeof(crb1->p) - short_len;
q = crb1->q + sizeof(crb1->q) - short_len;
dp = crb1->dp + sizeof(crb1->dp) - long_len;
dp = crb1->dp + sizeof(crb1->dp) - short_len;
dq = crb1->dq + sizeof(crb1->dq) - short_len;
u = crb1->u + sizeof(crb1->u) - long_len;
u = crb1->u + sizeof(crb1->u) - short_len;
inp = crb1->message + sizeof(crb1->message) - mod_len;
} else if (long_len <= 128) {
} else if (mod_len <= 256) { /* up to 2048 bit key size */
struct type50_crb2_msg *crb2 = ap_msg->message;
memset(crb2, 0, sizeof(*crb2));
ap_msg->length = sizeof(*crb2);
crb2->header.msg_type_code = TYPE50_TYPE_CODE;
crb2->header.msg_len = sizeof(*crb2);
crb2->keyblock_type = TYPE50_CRB2_FMT;
p = crb2->p + sizeof(crb2->p) - long_len;
p = crb2->p + sizeof(crb2->p) - short_len;
q = crb2->q + sizeof(crb2->q) - short_len;
dp = crb2->dp + sizeof(crb2->dp) - long_len;
dp = crb2->dp + sizeof(crb2->dp) - short_len;
dq = crb2->dq + sizeof(crb2->dq) - short_len;
u = crb2->u + sizeof(crb2->u) - long_len;
u = crb2->u + sizeof(crb2->u) - short_len;
inp = crb2->message + sizeof(crb2->message) - mod_len;
} else {
/* long_len >= 256 */
} else if ((mod_len <= 512) && /* up to 4096 bit key size */
(zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)) { /* >= CEX3A */
struct type50_crb3_msg *crb3 = ap_msg->message;
memset(crb3, 0, sizeof(*crb3));
ap_msg->length = sizeof(*crb3);
crb3->header.msg_type_code = TYPE50_TYPE_CODE;
crb3->header.msg_len = sizeof(*crb3);
crb3->keyblock_type = TYPE50_CRB3_FMT;
p = crb3->p + sizeof(crb3->p) - long_len;
p = crb3->p + sizeof(crb3->p) - short_len;
q = crb3->q + sizeof(crb3->q) - short_len;
dp = crb3->dp + sizeof(crb3->dp) - long_len;
dp = crb3->dp + sizeof(crb3->dp) - short_len;
dq = crb3->dq + sizeof(crb3->dq) - short_len;
u = crb3->u + sizeof(crb3->u) - long_len;
u = crb3->u + sizeof(crb3->u) - short_len;
inp = crb3->message + sizeof(crb3->message) - mod_len;
}
} else
return -EINVAL;
if (copy_from_user(p, crt->np_prime + long_offset, long_len) ||
/*
* correct the offset of p, bp and mult_inv according zcrypt.h
* block size right aligned (skip the first byte)
*/
if (copy_from_user(p, crt->np_prime + MSGTYPE_ADJUSTMENT, short_len) ||
copy_from_user(q, crt->nq_prime, short_len) ||
copy_from_user(dp, crt->bp_key + long_offset, long_len) ||
copy_from_user(dp, crt->bp_key + MSGTYPE_ADJUSTMENT, short_len) ||
copy_from_user(dq, crt->bq_key, short_len) ||
copy_from_user(u, crt->u_mult_inv + long_offset, long_len) ||
copy_from_user(u, crt->u_mult_inv + MSGTYPE_ADJUSTMENT, short_len) ||
copy_from_user(inp, crt->inputdata, mod_len))
return -EFAULT;

View File

@ -33,6 +33,8 @@
#define MSGTYPE50_CRB2_MAX_MSG_SIZE 0x390 /*sizeof(struct type50_crb2_msg)*/
#define MSGTYPE50_CRB3_MAX_MSG_SIZE 0x710 /*sizeof(struct type50_crb3_msg)*/
#define MSGTYPE_ADJUSTMENT 0x08 /*type04 extension (not needed in type50)*/
int zcrypt_msgtype50_init(void);
void zcrypt_msgtype50_exit(void);

View File

@ -83,19 +83,25 @@ static inline void __raw_writel(u32 b, volatile void __iomem *addr)
#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr)
#ifdef CONFIG_64BIT
#ifndef __raw_readq
static inline u64 __raw_readq(const volatile void __iomem *addr)
{
return *(const volatile u64 __force *) addr;
}
#endif
#define readq(addr) __le64_to_cpu(__raw_readq(addr))
#ifndef __raw_writeq
static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
{
*(volatile u64 __force *) addr = b;
}
#define writeq(b,addr) __raw_writeq(__cpu_to_le64(b),addr)
#endif
#define writeq(b, addr) __raw_writeq(__cpu_to_le64(b), addr)
#endif /* CONFIG_64BIT */
#ifndef PCI_IOBASE
#define PCI_IOBASE ((void __iomem *) 0)
#endif
@ -286,15 +292,20 @@ static inline void writesb(const void __iomem *addr, const void *buf, int len)
#ifndef CONFIG_GENERIC_IOMAP
struct pci_dev;
extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
#ifndef pci_iounmap
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
{
}
#endif
#endif /* CONFIG_GENERIC_IOMAP */
/*
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
*/
#ifndef virt_to_phys
static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa((unsigned long)address);
@ -304,6 +315,7 @@ static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
#endif
/*
* Change "struct page" to physical address.
@ -363,9 +375,16 @@ static inline void *bus_to_virt(unsigned long address)
}
#endif
#ifndef memset_io
#define memset_io(a, b, c) memset(__io_virt(a), (b), (c))
#endif
#ifndef memcpy_fromio
#define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c))
#endif
#ifndef memcpy_toio
#define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c))
#endif
#endif /* __KERNEL__ */

View File

@ -10,9 +10,6 @@
*/
#include <linux/smp.h>
#ifndef CONFIG_S390
#include <linux/linkage.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
@ -746,8 +743,11 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
#endif
#else /* !CONFIG_GENERIC_HARDIRQS */
extern struct msi_desc *irq_get_msi_desc(unsigned int irq);
extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
#endif /* CONFIG_GENERIC_HARDIRQS */
#endif /* !CONFIG_S390 */
#endif /* _LINUX_IRQ_H */