mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-30 08:04:13 +08:00
b67bfe0d42
I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
141 lines
3.1 KiB
C
141 lines
3.1 KiB
C
/*
|
|
* Copyright IBM Corp. 2012
|
|
*
|
|
* Author(s):
|
|
* Jan Glauber <jang@linux.vnet.ibm.com>
|
|
*/
|
|
|
|
#define COMPONENT "zPCI"
|
|
#define pr_fmt(fmt) COMPONENT ": " fmt
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/err.h>
|
|
#include <linux/rculist.h>
|
|
#include <linux/hash.h>
|
|
#include <linux/pci.h>
|
|
#include <linux/msi.h>
|
|
#include <asm/hw_irq.h>
|
|
|
|
/* mapping of irq numbers to msi_desc */
|
|
static struct hlist_head *msi_hash;
|
|
static unsigned int msihash_shift = 6;
|
|
#define msi_hashfn(nr) hash_long(nr, msihash_shift)
|
|
|
|
static DEFINE_SPINLOCK(msi_map_lock);
|
|
|
|
struct msi_desc *__irq_get_msi_desc(unsigned int irq)
|
|
{
|
|
struct msi_map *map;
|
|
|
|
hlist_for_each_entry_rcu(map,
|
|
&msi_hash[msi_hashfn(irq)], msi_chain)
|
|
if (map->irq == irq)
|
|
return map->msi;
|
|
return NULL;
|
|
}
|
|
|
|
int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
|
|
{
|
|
if (msi->msi_attrib.is_msix) {
|
|
int offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
|
|
PCI_MSIX_ENTRY_VECTOR_CTRL;
|
|
msi->masked = readl(msi->mask_base + offset);
|
|
writel(flag, msi->mask_base + offset);
|
|
} else {
|
|
if (msi->msi_attrib.maskbit) {
|
|
int pos;
|
|
u32 mask_bits;
|
|
|
|
pos = (long) msi->mask_base;
|
|
pci_read_config_dword(msi->dev, pos, &mask_bits);
|
|
mask_bits &= ~(mask);
|
|
mask_bits |= flag & mask;
|
|
pci_write_config_dword(msi->dev, pos, mask_bits);
|
|
} else {
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
msi->msi_attrib.maskbit = !!flag;
|
|
return 1;
|
|
}
|
|
|
|
int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
|
|
unsigned int nr, int offset)
|
|
{
|
|
struct msi_map *map;
|
|
struct msi_msg msg;
|
|
int rc;
|
|
|
|
map = kmalloc(sizeof(*map), GFP_KERNEL);
|
|
if (map == NULL)
|
|
return -ENOMEM;
|
|
|
|
map->irq = nr;
|
|
map->msi = msi;
|
|
zdev->msi_map[nr & ZPCI_MSI_MASK] = map;
|
|
|
|
pr_debug("%s hashing irq: %u to bucket nr: %llu\n",
|
|
__func__, nr, msi_hashfn(nr));
|
|
hlist_add_head_rcu(&map->msi_chain, &msi_hash[msi_hashfn(nr)]);
|
|
|
|
spin_lock(&msi_map_lock);
|
|
rc = irq_set_msi_desc(nr, msi);
|
|
if (rc) {
|
|
spin_unlock(&msi_map_lock);
|
|
hlist_del_rcu(&map->msi_chain);
|
|
kfree(map);
|
|
zdev->msi_map[nr & ZPCI_MSI_MASK] = NULL;
|
|
return rc;
|
|
}
|
|
spin_unlock(&msi_map_lock);
|
|
|
|
msg.data = nr - offset;
|
|
msg.address_lo = zdev->msi_addr & 0xffffffff;
|
|
msg.address_hi = zdev->msi_addr >> 32;
|
|
write_msi_msg(nr, &msg);
|
|
return 0;
|
|
}
|
|
|
|
void zpci_teardown_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi)
|
|
{
|
|
int irq = msi->irq & ZPCI_MSI_MASK;
|
|
struct msi_map *map;
|
|
|
|
msi->msg.address_lo = 0;
|
|
msi->msg.address_hi = 0;
|
|
msi->msg.data = 0;
|
|
msi->irq = 0;
|
|
zpci_msi_set_mask_bits(msi, 1, 1);
|
|
|
|
spin_lock(&msi_map_lock);
|
|
map = zdev->msi_map[irq];
|
|
hlist_del_rcu(&map->msi_chain);
|
|
kfree(map);
|
|
zdev->msi_map[irq] = NULL;
|
|
spin_unlock(&msi_map_lock);
|
|
}
|
|
|
|
/*
|
|
* The msi hash table has 256 entries which is good for 4..20
|
|
* devices (a typical device allocates 10 + CPUs MSI's). Maybe make
|
|
* the hash table size adjustable later.
|
|
*/
|
|
int __init zpci_msihash_init(void)
|
|
{
|
|
unsigned int i;
|
|
|
|
msi_hash = kmalloc(256 * sizeof(*msi_hash), GFP_KERNEL);
|
|
if (!msi_hash)
|
|
return -ENOMEM;
|
|
|
|
for (i = 0; i < (1U << msihash_shift); i++)
|
|
INIT_HLIST_HEAD(&msi_hash[i]);
|
|
return 0;
|
|
}
|
|
|
|
void __init zpci_msihash_exit(void)
|
|
{
|
|
kfree(msi_hash);
|
|
}
|