2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-15 00:34:10 +08:00

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless

Conflicts:
	net/nfc/llcp/llcp.c
This commit is contained in:
John W. Linville 2013-03-18 09:39:21 -04:00
commit 49c87cd1ea
101 changed files with 2633 additions and 600 deletions

View File

@ -30,6 +30,7 @@ The target is named "raid" and it accepts the following parameters:
raid10 Various RAID10 inspired algorithms chosen by additional params
- RAID10: Striped Mirrors (aka 'Striping on top of mirrors')
- RAID1E: Integrated Adjacent Stripe Mirroring
- RAID1E: Integrated Offset Stripe Mirroring
- and other similar RAID10 variants
Reference: Chapter 4 of
@ -64,15 +65,15 @@ The target is named "raid" and it accepts the following parameters:
synchronisation state for each region.
[raid10_copies <# copies>]
[raid10_format near]
[raid10_format <near|far|offset>]
These two options are used to alter the default layout of
a RAID10 configuration. The number of copies is can be
specified, but the default is 2. There are other variations
to how the copies are laid down - the default and only current
option is "near". Near copies are what most people think of
with respect to mirroring. If these options are left
unspecified, or 'raid10_copies 2' and/or 'raid10_format near'
are given, then the layouts for 2, 3 and 4 devices are:
specified, but the default is 2. There are also three
variations to how the copies are laid down - the default
is "near". Near copies are what most people think of with
respect to mirroring. If these options are left unspecified,
or 'raid10_copies 2' and/or 'raid10_format near' are given,
then the layouts for 2, 3 and 4 devices are:
2 drives 3 drives 4 drives
-------- ---------- --------------
A1 A1 A1 A1 A2 A1 A1 A2 A2
@ -85,6 +86,33 @@ The target is named "raid" and it accepts the following parameters:
3-device layout is what might be called a 'RAID1E - Integrated
Adjacent Stripe Mirroring'.
If 'raid10_copies 2' and 'raid10_format far', then the layouts
for 2, 3 and 4 devices are:
2 drives 3 drives 4 drives
-------- -------------- --------------------
A1 A2 A1 A2 A3 A1 A2 A3 A4
A3 A4 A4 A5 A6 A5 A6 A7 A8
A5 A6 A7 A8 A9 A9 A10 A11 A12
.. .. .. .. .. .. .. .. ..
A2 A1 A3 A1 A2 A2 A1 A4 A3
A4 A3 A6 A4 A5 A6 A5 A8 A7
A6 A5 A9 A7 A8 A10 A9 A12 A11
.. .. .. .. .. .. .. .. ..
If 'raid10_copies 2' and 'raid10_format offset', then the
layouts for 2, 3 and 4 devices are:
2 drives 3 drives 4 drives
-------- ------------ -----------------
A1 A2 A1 A2 A3 A1 A2 A3 A4
A2 A1 A3 A1 A2 A2 A1 A4 A3
A3 A4 A4 A5 A6 A5 A6 A7 A8
A4 A3 A6 A4 A5 A6 A5 A8 A7
A5 A6 A7 A8 A9 A9 A10 A11 A12
A6 A5 A9 A7 A8 A10 A9 A12 A11
.. .. .. .. .. .. .. .. ..
Here we see layouts closely akin to 'RAID1E - Integrated
Offset Stripe Mirroring'.
<#raid_devs>: The number of devices composing the array.
Each device consists of two entries. The first is the device
containing the metadata (if any); the second is the one containing the
@ -142,3 +170,5 @@ Version History
1.3.0 Added support for RAID 10
1.3.1 Allow device replacement/rebuild for RAID 10
1.3.2 Fix/improve redundancy checking for RAID10
1.4.0 Non-functional change. Removes arg from mapping function.
1.4.1 Add RAID10 "far" and "offset" algorithm support.

View File

@ -105,6 +105,83 @@ Copyright (C) 1999-2000 Maxim Krasnyansky <max_mk@yahoo.com>
Proto [2 bytes]
Raw protocol(IP, IPv6, etc) frame.
3.3 Multiqueue tuntap interface:
From version 3.8, Linux supports multiqueue tuntap which can uses multiple
file descriptors (queues) to parallelize packets sending or receiving. The
device allocation is the same as before, and if user wants to create multiple
queues, TUNSETIFF with the same device name must be called many times with
IFF_MULTI_QUEUE flag.
char *dev should be the name of the device, queues is the number of queues to
be created, fds is used to store and return the file descriptors (queues)
created to the caller. Each file descriptor were served as the interface of a
queue which could be accessed by userspace.
#include <linux/if.h>
#include <linux/if_tun.h>
int tun_alloc_mq(char *dev, int queues, int *fds)
{
struct ifreq ifr;
int fd, err, i;
if (!dev)
return -1;
memset(&ifr, 0, sizeof(ifr));
/* Flags: IFF_TUN - TUN device (no Ethernet headers)
* IFF_TAP - TAP device
*
* IFF_NO_PI - Do not provide packet information
* IFF_MULTI_QUEUE - Create a queue of multiqueue device
*/
ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_MULTI_QUEUE;
strcpy(ifr.ifr_name, dev);
for (i = 0; i < queues; i++) {
if ((fd = open("/dev/net/tun", O_RDWR)) < 0)
goto err;
err = ioctl(fd, TUNSETIFF, (void *)&ifr);
if (err) {
close(fd);
goto err;
}
fds[i] = fd;
}
return 0;
err:
for (--i; i >= 0; i--)
close(fds[i]);
return err;
}
A new ioctl(TUNSETQUEUE) were introduced to enable or disable a queue. When
calling it with IFF_DETACH_QUEUE flag, the queue were disabled. And when
calling it with IFF_ATTACH_QUEUE flag, the queue were enabled. The queue were
enabled by default after it was created through TUNSETIFF.
fd is the file descriptor (queue) that we want to enable or disable, when
enable is true we enable it, otherwise we disable it
#include <linux/if.h>
#include <linux/if_tun.h>
int tun_set_queue(int fd, int enable)
{
struct ifreq ifr;
memset(&ifr, 0, sizeof(ifr));
if (enable)
ifr.ifr_flags = IFF_ATTACH_QUEUE;
else
ifr.ifr_flags = IFF_DETACH_QUEUE;
return ioctl(fd, TUNSETQUEUE, (void *)&ifr);
}
Universal TUN/TAP device driver Frequently Asked Question.
1. What platforms are supported by TUN/TAP driver ?

View File

@ -114,12 +114,6 @@ Maintainers List (try to look for most precise areas first)
-----------------------------------
3C505 NETWORK DRIVER
M: Philip Blundell <philb@gnu.org>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/i825xx/3c505*
3C59X NETWORK DRIVER
M: Steffen Klassert <klassert@mathematik.tu-chemnitz.de>
L: netdev@vger.kernel.org
@ -2361,12 +2355,6 @@ W: http://www.arm.linux.org.uk/
S: Maintained
F: drivers/video/cyber2000fb.*
CYCLADES 2X SYNC CARD DRIVER
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
W: http://oops.ghostprotocols.net:81/blog
S: Maintained
F: drivers/net/wan/cycx*
CYCLADES ASYNC MUX DRIVER
W: http://www.cyclades.com/
S: Orphan
@ -3067,12 +3055,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git
F: drivers/video/s1d13xxxfb.c
F: include/video/s1d13xxxfb.h
ETHEREXPRESS-16 NETWORK DRIVER
M: Philip Blundell <philb@gnu.org>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/i825xx/eexpress.*
ETHERNET BRIDGE
M: Stephen Hemminger <stephen@networkplumber.org>
L: bridge@lists.linux-foundation.org

View File

@ -113,7 +113,7 @@
STEPUP4((t)+16, fn)
_GLOBAL(powerpc_sha_transform)
PPC_STLU r1,-STACKFRAMESIZE(r1)
PPC_STLU r1,-INT_FRAME_SIZE(r1)
SAVE_8GPRS(14, r1)
SAVE_10GPRS(22, r1)
@ -175,5 +175,5 @@ _GLOBAL(powerpc_sha_transform)
REST_8GPRS(14, r1)
REST_10GPRS(22, r1)
addi r1,r1,STACKFRAMESIZE
addi r1,r1,INT_FRAME_SIZE
blr

View File

@ -52,8 +52,6 @@
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
/* Macro for generating the ***_bits() functions */
#define DEFINE_BITOP(fn, op, prefix, postfix) \
static __inline__ void fn(unsigned long mask, \

View File

@ -266,7 +266,8 @@
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
#define FSCR_TAR (1<<8) /* Enable Target Adress Register */
#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 (1ul << (63-0))

View File

@ -358,3 +358,4 @@ SYSCALL_SPU(setns)
COMPAT_SYS(process_vm_readv)
COMPAT_SYS(process_vm_writev)
SYSCALL(finit_module)
SYSCALL(ni_syscall) /* sys_kcmp */

View File

@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
#define __NR_syscalls 354
#define __NR_syscalls 355
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls

View File

@ -376,6 +376,7 @@
#define __NR_process_vm_readv 351
#define __NR_process_vm_writev 352
#define __NR_finit_module 353
#define __NR_kcmp 354
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */

View File

@ -48,6 +48,7 @@ _GLOBAL(__restore_cpu_power7)
_GLOBAL(__setup_cpu_power8)
mflr r11
bl __init_FSCR
bl __init_hvmode_206
mtlr r11
beqlr
@ -56,13 +57,13 @@ _GLOBAL(__setup_cpu_power8)
mfspr r3,SPRN_LPCR
oris r3, r3, LPCR_AIL_3@h
bl __init_LPCR
bl __init_FSCR
bl __init_TLB
mtlr r11
blr
_GLOBAL(__restore_cpu_power8)
mflr r11
bl __init_FSCR
mfmsr r3
rldicl. r0,r3,4,63
beqlr
@ -115,7 +116,7 @@ __init_LPCR:
__init_FSCR:
mfspr r3,SPRN_FSCR
ori r3,r3,FSCR_TAR
ori r3,r3,FSCR_TAR|FSCR_DSCR
mtspr SPRN_FSCR,r3
blr

View File

@ -74,13 +74,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
mflr r10 ; \
ld r12,PACAKBASE(r13) ; \
LOAD_HANDLER(r12, system_call_entry_direct) ; \
mtlr r12 ; \
mtctr r12 ; \
mfspr r12,SPRN_SRR1 ; \
/* Re-use of r13... No spare regs to do this */ \
li r13,MSR_RI ; \
mtmsrd r13,1 ; \
GET_PACA(r13) ; /* get r13 back */ \
blr ;
bctr ;
#else
/* We can branch directly */
#define SYSCALL_PSERIES_2_DIRECT \

View File

@ -23,6 +23,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <asm/hvcall.h>
#include <asm/hvcserver.h>
@ -188,9 +189,9 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
= (unsigned int)last_p_partition_ID;
/* copy the Null-term char too */
strncpy(&next_partner_info->location_code[0],
strlcpy(&next_partner_info->location_code[0],
(char *)&pi_buff[2],
strlen((char *)&pi_buff[2]) + 1);
sizeof(next_partner_info->location_code));
list_add_tail(&(next_partner_info->node), head);
next_partner_info = NULL;

View File

@ -404,6 +404,8 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
return;
}
spin_lock_init(&pc_host->cfgspace_lock);
pc->host_controller = pc_host;
pc_host->pci_controller.io_resource = &pc_host->io_resource;
pc_host->pci_controller.mem_resource = &pc_host->mem_resource;

View File

@ -74,8 +74,10 @@ static struct usb_device_id ath3k_table[] = {
/* Atheros AR3012 with sflash firmware*/
{ USB_DEVICE(0x0CF3, 0x3004) },
{ USB_DEVICE(0x0CF3, 0x3008) },
{ USB_DEVICE(0x0CF3, 0x311D) },
{ USB_DEVICE(0x13d3, 0x3375) },
{ USB_DEVICE(0x04CA, 0x3004) },
{ USB_DEVICE(0x04CA, 0x3005) },
{ USB_DEVICE(0x04CA, 0x3006) },
{ USB_DEVICE(0x04CA, 0x3008) },
@ -106,8 +108,10 @@ static struct usb_device_id ath3k_blist_tbl[] = {
/* Atheros AR3012 with sflash firmware*/
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },

View File

@ -132,8 +132,10 @@ static struct usb_device_id blacklist_table[] = {
/* Atheros 3012 with sflash firmware */
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },

View File

@ -40,6 +40,7 @@
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
@ -52,8 +53,12 @@ static struct hwrng *current_rng;
static LIST_HEAD(rng_list);
static DEFINE_MUTEX(rng_mutex);
static int data_avail;
static u8 rng_buffer[SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES]
__cacheline_aligned;
static u8 *rng_buffer;
static size_t rng_buffer_size(void)
{
return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
}
static inline int hwrng_init(struct hwrng *rng)
{
@ -116,7 +121,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
if (!data_avail) {
bytes_read = rng_get_data(current_rng, rng_buffer,
sizeof(rng_buffer),
rng_buffer_size(),
!(filp->f_flags & O_NONBLOCK));
if (bytes_read < 0) {
err = bytes_read;
@ -307,6 +312,14 @@ int hwrng_register(struct hwrng *rng)
mutex_lock(&rng_mutex);
/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
err = -ENOMEM;
if (!rng_buffer) {
rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
if (!rng_buffer)
goto out_unlock;
}
/* Must not register two RNGs with the same name. */
err = -EEXIST;
list_for_each_entry(tmp, &rng_list, list) {

View File

@ -313,6 +313,12 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
(task_active_pid_ns(current) != &init_pid_ns))
return;
/* Can only change if privileged. */
if (!capable(CAP_NET_ADMIN)) {
err = EPERM;
goto out;
}
mc_op = (enum proc_cn_mcast_op *)msg->data;
switch (*mc_op) {
case PROC_CN_MCAST_LISTEN:
@ -325,6 +331,8 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
err = EINVAL;
break;
}
out:
cn_proc_ack(err, msg->seq, msg->ack);
}

View File

@ -128,9 +128,9 @@ static int ichx_read_bit(int reg, unsigned nr)
return data & (1 << bit) ? 1 : 0;
}
static int ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
static bool ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
{
return (ichx_priv.use_gpio & (1 << (nr / 32))) ? 0 : -ENXIO;
return ichx_priv.use_gpio & (1 << (nr / 32));
}
static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)

View File

@ -88,13 +88,14 @@ static int gpiod_request(struct gpio_desc *desc, const char *label);
static void gpiod_free(struct gpio_desc *desc);
static int gpiod_direction_input(struct gpio_desc *desc);
static int gpiod_direction_output(struct gpio_desc *desc, int value);
static int gpiod_get_direction(const struct gpio_desc *desc);
static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
static int gpiod_get_value_cansleep(struct gpio_desc *desc);
static int gpiod_get_value_cansleep(const struct gpio_desc *desc);
static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
static int gpiod_get_value(struct gpio_desc *desc);
static int gpiod_get_value(const struct gpio_desc *desc);
static void gpiod_set_value(struct gpio_desc *desc, int value);
static int gpiod_cansleep(struct gpio_desc *desc);
static int gpiod_to_irq(struct gpio_desc *desc);
static int gpiod_cansleep(const struct gpio_desc *desc);
static int gpiod_to_irq(const struct gpio_desc *desc);
static int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
static int gpiod_export_link(struct device *dev, const char *name,
struct gpio_desc *desc);
@ -171,12 +172,12 @@ static int gpio_ensure_requested(struct gpio_desc *desc)
return 0;
}
/* caller holds gpio_lock *OR* gpio is marked as requested */
static struct gpio_chip *gpiod_to_chip(struct gpio_desc *desc)
static struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
{
return desc->chip;
return desc ? desc->chip : NULL;
}
/* caller holds gpio_lock *OR* gpio is marked as requested */
struct gpio_chip *gpio_to_chip(unsigned gpio)
{
return gpiod_to_chip(gpio_to_desc(gpio));
@ -207,7 +208,7 @@ static int gpiochip_find_base(int ngpio)
}
/* caller ensures gpio is valid and requested, chip->get_direction may sleep */
static int gpiod_get_direction(struct gpio_desc *desc)
static int gpiod_get_direction(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
unsigned offset;
@ -223,11 +224,13 @@ static int gpiod_get_direction(struct gpio_desc *desc)
if (status > 0) {
/* GPIOF_DIR_IN, or other positive */
status = 1;
clear_bit(FLAG_IS_OUT, &desc->flags);
/* FLAG_IS_OUT is just a cache of the result of get_direction(),
* so it does not affect constness per se */
clear_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
}
if (status == 0) {
/* GPIOF_DIR_OUT */
set_bit(FLAG_IS_OUT, &desc->flags);
set_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
}
return status;
}
@ -263,7 +266,7 @@ static DEFINE_MUTEX(sysfs_lock);
static ssize_t gpio_direction_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gpio_desc *desc = dev_get_drvdata(dev);
const struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
mutex_lock(&sysfs_lock);
@ -654,6 +657,11 @@ static ssize_t export_store(struct class *class,
goto done;
desc = gpio_to_desc(gpio);
/* reject invalid GPIOs */
if (!desc) {
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
return -EINVAL;
}
/* No extra locking here; FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
@ -690,12 +698,14 @@ static ssize_t unexport_store(struct class *class,
if (status < 0)
goto done;
status = -EINVAL;
desc = gpio_to_desc(gpio);
/* reject bogus commands (gpio_unexport ignores them) */
if (!desc)
goto done;
if (!desc) {
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
return -EINVAL;
}
status = -EINVAL;
/* No extra locking here; FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
@ -846,8 +856,10 @@ static int gpiod_export_link(struct device *dev, const char *name,
{
int status = -EINVAL;
if (!desc)
goto done;
if (!desc) {
pr_warn("%s: invalid GPIO\n", __func__);
return -EINVAL;
}
mutex_lock(&sysfs_lock);
@ -865,7 +877,6 @@ static int gpiod_export_link(struct device *dev, const char *name,
mutex_unlock(&sysfs_lock);
done:
if (status)
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
status);
@ -896,8 +907,10 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
struct device *dev = NULL;
int status = -EINVAL;
if (!desc)
goto done;
if (!desc) {
pr_warn("%s: invalid GPIO\n", __func__);
return -EINVAL;
}
mutex_lock(&sysfs_lock);
@ -914,7 +927,6 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
unlock:
mutex_unlock(&sysfs_lock);
done:
if (status)
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
status);
@ -940,8 +952,8 @@ static void gpiod_unexport(struct gpio_desc *desc)
struct device *dev = NULL;
if (!desc) {
status = -EINVAL;
goto done;
pr_warn("%s: invalid GPIO\n", __func__);
return;
}
mutex_lock(&sysfs_lock);
@ -962,7 +974,7 @@ static void gpiod_unexport(struct gpio_desc *desc)
device_unregister(dev);
put_device(dev);
}
done:
if (status)
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
status);
@ -1384,12 +1396,13 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
int status = -EPROBE_DEFER;
unsigned long flags;
if (!desc) {
pr_warn("%s: invalid GPIO\n", __func__);
return -EINVAL;
}
spin_lock_irqsave(&gpio_lock, flags);
if (!desc) {
status = -EINVAL;
goto done;
}
chip = desc->chip;
if (chip == NULL)
goto done;
@ -1432,8 +1445,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
done:
if (status)
pr_debug("_gpio_request: gpio-%d (%s) status %d\n",
desc ? desc_to_gpio(desc) : -1,
label ? : "?", status);
desc_to_gpio(desc), label ? : "?", status);
spin_unlock_irqrestore(&gpio_lock, flags);
return status;
}
@ -1616,10 +1628,13 @@ static int gpiod_direction_input(struct gpio_desc *desc)
int status = -EINVAL;
int offset;
if (!desc) {
pr_warn("%s: invalid GPIO\n", __func__);
return -EINVAL;
}
spin_lock_irqsave(&gpio_lock, flags);
if (!desc)
goto fail;
chip = desc->chip;
if (!chip || !chip->get || !chip->direction_input)
goto fail;
@ -1655,13 +1670,9 @@ lose:
return status;
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
if (status) {
int gpio = -1;
if (desc)
gpio = desc_to_gpio(desc);
pr_debug("%s: gpio-%d status %d\n",
__func__, gpio, status);
}
if (status)
pr_debug("%s: gpio-%d status %d\n", __func__,
desc_to_gpio(desc), status);
return status;
}
@ -1678,6 +1689,11 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
int status = -EINVAL;
int offset;
if (!desc) {
pr_warn("%s: invalid GPIO\n", __func__);
return -EINVAL;
}
/* Open drain pin should not be driven to 1 */
if (value && test_bit(FLAG_OPEN_DRAIN, &desc->flags))
return gpiod_direction_input(desc);
@ -1688,8 +1704,6 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
spin_lock_irqsave(&gpio_lock, flags);
if (!desc)
goto fail;
chip = desc->chip;
if (!chip || !chip->set || !chip->direction_output)
goto fail;
@ -1725,13 +1739,9 @@ lose:
return status;
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
if (status) {
int gpio = -1;
if (desc)
gpio = desc_to_gpio(desc);
pr_debug("%s: gpio-%d status %d\n",
__func__, gpio, status);
}
if (status)
pr_debug("%s: gpio-%d status %d\n", __func__,
desc_to_gpio(desc), status);
return status;
}
@ -1753,10 +1763,13 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
int status = -EINVAL;
int offset;
if (!desc) {
pr_warn("%s: invalid GPIO\n", __func__);
return -EINVAL;
}
spin_lock_irqsave(&gpio_lock, flags);
if (!desc)
goto fail;
chip = desc->chip;
if (!chip || !chip->set || !chip->set_debounce)
goto fail;
@ -1776,13 +1789,9 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
if (status) {
int gpio = -1;
if (desc)
gpio = desc_to_gpio(desc);
pr_debug("%s: gpio-%d status %d\n",
__func__, gpio, status);
}
if (status)
pr_debug("%s: gpio-%d status %d\n", __func__,
desc_to_gpio(desc), status);
return status;
}
@ -1824,12 +1833,14 @@ EXPORT_SYMBOL_GPL(gpio_set_debounce);
* It returns the zero or nonzero value provided by the associated
* gpio_chip.get() method; or zero if no such method is provided.
*/
static int gpiod_get_value(struct gpio_desc *desc)
static int gpiod_get_value(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
int value;
int offset;
if (!desc)
return 0;
chip = desc->chip;
offset = gpio_chip_hwgpio(desc);
/* Should be using gpio_get_value_cansleep() */
@ -1912,6 +1923,8 @@ static void gpiod_set_value(struct gpio_desc *desc, int value)
{
struct gpio_chip *chip;
if (!desc)
return;
chip = desc->chip;
/* Should be using gpio_set_value_cansleep() */
WARN_ON(chip->can_sleep);
@ -1938,8 +1951,10 @@ EXPORT_SYMBOL_GPL(__gpio_set_value);
* This is used directly or indirectly to implement gpio_cansleep(). It
* returns nonzero if access reading or writing the GPIO value can sleep.
*/
static int gpiod_cansleep(struct gpio_desc *desc)
static int gpiod_cansleep(const struct gpio_desc *desc)
{
if (!desc)
return 0;
/* only call this on GPIOs that are valid! */
return desc->chip->can_sleep;
}
@ -1959,11 +1974,13 @@ EXPORT_SYMBOL_GPL(__gpio_cansleep);
* It returns the number of the IRQ signaled by this (input) GPIO,
* or a negative errno.
*/
static int gpiod_to_irq(struct gpio_desc *desc)
static int gpiod_to_irq(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
int offset;
if (!desc)
return -EINVAL;
chip = desc->chip;
offset = gpio_chip_hwgpio(desc);
return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO;
@ -1980,13 +1997,15 @@ EXPORT_SYMBOL_GPL(__gpio_to_irq);
* Common examples include ones connected to I2C or SPI chips.
*/
static int gpiod_get_value_cansleep(struct gpio_desc *desc)
static int gpiod_get_value_cansleep(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
int value;
int offset;
might_sleep_if(extra_checks);
if (!desc)
return 0;
chip = desc->chip;
offset = gpio_chip_hwgpio(desc);
value = chip->get ? chip->get(chip, offset) : 0;
@ -2005,6 +2024,8 @@ static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
struct gpio_chip *chip;
might_sleep_if(extra_checks);
if (!desc)
return;
chip = desc->chip;
trace_gpio_value(desc_to_gpio(desc), 0, value);
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))

View File

@ -294,13 +294,13 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
// Allocate URBs and buffers for interrupt endpoint
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
return -ENOMEM;
goto err1;
}
intr->urb = urb;
buf = kmalloc(INT_PKT_SIZE, GFP_KERNEL);
if (!buf) {
return -ENOMEM;
goto err2;
}
endpoint = &altsetting->endpoint[EP_INT-1];
@ -313,6 +313,14 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
endpoint->desc.bInterval);
return 0;
err2:
usb_free_urb(intr->urb);
intr->urb = NULL;
err1:
usb_free_urb(ctrl->urb);
ctrl->urb = NULL;
return -ENOMEM;
}
/*

View File

@ -154,17 +154,6 @@ config MD_RAID456
If unsure, say Y.
config MULTICORE_RAID456
bool "RAID-4/RAID-5/RAID-6 Multicore processing (EXPERIMENTAL)"
depends on MD_RAID456
depends on SMP
depends on EXPERIMENTAL
---help---
Enable the raid456 module to dispatch per-stripe raid operations to a
thread pool.
If unsure, say N.
config MD_MULTIPATH
tristate "Multipath I/O support"
depends on BLK_DEV_MD

View File

@ -91,15 +91,44 @@ static struct raid_type {
{"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
};
static char *raid10_md_layout_to_format(int layout)
{
/*
* Bit 16 and 17 stand for "offset" and "use_far_sets"
* Refer to MD's raid10.c for details
*/
if ((layout & 0x10000) && (layout & 0x20000))
return "offset";
if ((layout & 0xFF) > 1)
return "near";
return "far";
}
static unsigned raid10_md_layout_to_copies(int layout)
{
return layout & 0xFF;
if ((layout & 0xFF) > 1)
return layout & 0xFF;
return (layout >> 8) & 0xFF;
}
static int raid10_format_to_md_layout(char *format, unsigned copies)
{
/* 1 "far" copy, and 'copies' "near" copies */
return (1 << 8) | (copies & 0xFF);
unsigned n = 1, f = 1;
if (!strcmp("near", format))
n = copies;
else
f = copies;
if (!strcmp("offset", format))
return 0x30000 | (f << 8) | n;
if (!strcmp("far", format))
return 0x20000 | (f << 8) | n;
return (f << 8) | n;
}
static struct raid_type *get_raid_type(char *name)
@ -352,6 +381,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
{
unsigned i, rebuild_cnt = 0;
unsigned rebuilds_per_group, copies, d;
unsigned group_size, last_group_start;
for (i = 0; i < rs->md.raid_disks; i++)
if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
@ -379,9 +409,6 @@ static int validate_raid_redundancy(struct raid_set *rs)
* as long as the failed devices occur in different mirror
* groups (i.e. different stripes).
*
* Right now, we only allow for "near" copies. When other
* formats are added, we will have to check those too.
*
* When checking "near" format, make sure no adjacent devices
* have failed beyond what can be handled. In addition to the
* simple case where the number of devices is a multiple of the
@ -391,14 +418,41 @@ static int validate_raid_redundancy(struct raid_set *rs)
* A A B B C
* C D D E E
*/
for (i = 0; i < rs->md.raid_disks * copies; i++) {
if (!(i % copies))
if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
for (i = 0; i < rs->md.raid_disks * copies; i++) {
if (!(i % copies))
rebuilds_per_group = 0;
d = i % rs->md.raid_disks;
if ((!rs->dev[d].rdev.sb_page ||
!test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
(++rebuilds_per_group >= copies))
goto too_many;
}
break;
}
/*
* When checking "far" and "offset" formats, we need to ensure
* that the device that holds its copy is not also dead or
* being rebuilt. (Note that "far" and "offset" formats only
* support two copies right now. These formats also only ever
* use the 'use_far_sets' variant.)
*
* This check is somewhat complicated by the need to account
* for arrays that are not a multiple of (far) copies. This
* results in the need to treat the last (potentially larger)
* set differently.
*/
group_size = (rs->md.raid_disks / copies);
last_group_start = (rs->md.raid_disks / group_size) - 1;
last_group_start *= group_size;
for (i = 0; i < rs->md.raid_disks; i++) {
if (!(i % copies) && !(i > last_group_start))
rebuilds_per_group = 0;
d = i % rs->md.raid_disks;
if ((!rs->dev[d].rdev.sb_page ||
!test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
if ((!rs->dev[i].rdev.sb_page ||
!test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
(++rebuilds_per_group >= copies))
goto too_many;
goto too_many;
}
break;
default:
@ -433,7 +487,7 @@ too_many:
*
* RAID10-only options:
* [raid10_copies <# copies>] Number of copies. (Default: 2)
* [raid10_format <near>] Layout algorithm. (Default: near)
* [raid10_format <near|far|offset>] Layout algorithm. (Default: near)
*/
static int parse_raid_params(struct raid_set *rs, char **argv,
unsigned num_raid_params)
@ -520,7 +574,9 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
return -EINVAL;
}
if (strcmp("near", argv[i])) {
if (strcmp("near", argv[i]) &&
strcmp("far", argv[i]) &&
strcmp("offset", argv[i])) {
rs->ti->error = "Invalid 'raid10_format' value given";
return -EINVAL;
}
@ -644,6 +700,15 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
return -EINVAL;
}
/*
* If the format is not "near", we only support
* two copies at the moment.
*/
if (strcmp("near", raid10_format) && (raid10_copies > 2)) {
rs->ti->error = "Too many copies for given RAID10 format.";
return -EINVAL;
}
/* (Len * #mirrors) / #devices */
sectors_per_dev = rs->ti->len * raid10_copies;
sector_div(sectors_per_dev, rs->md.raid_disks);
@ -854,17 +919,30 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
/*
* Reshaping is not currently allowed
*/
if ((le32_to_cpu(sb->level) != mddev->level) ||
(le32_to_cpu(sb->layout) != mddev->layout) ||
(le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors)) {
DMERR("Reshaping arrays not yet supported.");
if (le32_to_cpu(sb->level) != mddev->level) {
DMERR("Reshaping arrays not yet supported. (RAID level change)");
return -EINVAL;
}
if (le32_to_cpu(sb->layout) != mddev->layout) {
DMERR("Reshaping arrays not yet supported. (RAID layout change)");
DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
DMERR(" Old layout: %s w/ %d copies",
raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
DMERR(" New layout: %s w/ %d copies",
raid10_md_layout_to_format(mddev->layout),
raid10_md_layout_to_copies(mddev->layout));
return -EINVAL;
}
if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
return -EINVAL;
}
/* We can only change the number of devices in RAID1 right now */
if ((rs->raid_type->level != 1) &&
(le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
DMERR("Reshaping arrays not yet supported.");
DMERR("Reshaping arrays not yet supported. (device count change)");
return -EINVAL;
}
@ -1329,7 +1407,8 @@ static void raid_status(struct dm_target *ti, status_type_t type,
raid10_md_layout_to_copies(rs->md.layout));
if (rs->print_flags & DMPF_RAID10_FORMAT)
DMEMIT(" raid10_format near");
DMEMIT(" raid10_format %s",
raid10_md_layout_to_format(rs->md.layout));
DMEMIT(" %d", rs->md.raid_disks);
for (i = 0; i < rs->md.raid_disks; i++) {
@ -1418,6 +1497,10 @@ static struct target_type raid_target = {
static int __init dm_raid_init(void)
{
DMINFO("Loading target version %u.%u.%u",
raid_target.version[0],
raid_target.version[1],
raid_target.version[2]);
return dm_register_target(&raid_target);
}

View File

@ -307,6 +307,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
bio_io_error(bio);
return;
}
if (mddev->ro == 1 && unlikely(rw == WRITE)) {
bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
return;
}
smp_rmb(); /* Ensure implications of 'active' are visible */
rcu_read_lock();
if (mddev->suspended) {
@ -2994,6 +2998,9 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
} else if (!sectors)
sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
rdev->data_offset;
if (!my_mddev->pers->resize)
/* Cannot change size for RAID0 or Linear etc */
return -EINVAL;
}
if (sectors < my_mddev->dev_sectors)
return -EINVAL; /* component must fit device */
@ -6525,7 +6532,17 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
mddev->ro = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
/* mddev_unlock will wake thread */
/* If a device failed while we were read-only, we
* need to make sure the metadata is updated now.
*/
if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
mddev_unlock(mddev);
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
mddev_lock(mddev);
}
} else {
err = -EROFS;
goto abort_unlock;

View File

@ -175,7 +175,13 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
rdev1->new_raid_disk = j;
}
if (j < 0 || j >= mddev->raid_disks) {
if (j < 0) {
printk(KERN_ERR
"md/raid0:%s: remove inactive devices before converting to RAID0\n",
mdname(mddev));
goto abort;
}
if (j >= mddev->raid_disks) {
printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
"aborting!\n", mdname(mddev), j);
goto abort;
@ -289,7 +295,7 @@ abort:
kfree(conf->strip_zone);
kfree(conf->devlist);
kfree(conf);
*private_conf = NULL;
*private_conf = ERR_PTR(err);
return err;
}
@ -411,7 +417,8 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
"%s does not support generic reshape\n", __func__);
rdev_for_each(rdev, mddev)
array_sectors += rdev->sectors;
array_sectors += (rdev->sectors &
~(sector_t)(mddev->chunk_sectors-1));
return array_sectors;
}

View File

@ -967,6 +967,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
bio_list_merge(&conf->pending_bio_list, &plug->pending);
conf->pending_count += plug->pending_cnt;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_barrier);
md_wakeup_thread(mddev->thread);
kfree(plug);
return;
@ -1000,6 +1001,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
const unsigned long do_discard = (bio->bi_rw
& (REQ_DISCARD | REQ_SECURE));
const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
struct raid1_plug_cb *plug = NULL;
@ -1301,7 +1303,8 @@ read_again:
conf->mirrors[i].rdev->data_offset);
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard;
mbio->bi_rw =
WRITE | do_flush_fua | do_sync | do_discard | do_same;
mbio->bi_private = r1_bio;
atomic_inc(&r1_bio->remaining);
@ -2818,6 +2821,9 @@ static int run(struct mddev *mddev)
if (IS_ERR(conf))
return PTR_ERR(conf);
if (mddev->queue)
blk_queue_max_write_same_sectors(mddev->queue,
mddev->chunk_sectors);
rdev_for_each(rdev, mddev) {
if (!mddev->gendisk)
continue;

View File

@ -38,21 +38,36 @@
* near_copies (stored in low byte of layout)
* far_copies (stored in second byte of layout)
* far_offset (stored in bit 16 of layout )
* use_far_sets (stored in bit 17 of layout )
*
* The data to be stored is divided into chunks using chunksize.
* Each device is divided into far_copies sections.
* In each section, chunks are laid out in a style similar to raid0, but
* near_copies copies of each chunk is stored (each on a different drive).
* The starting device for each section is offset near_copies from the starting
* device of the previous section.
* Thus they are (near_copies*far_copies) of each chunk, and each is on a different
* drive.
* near_copies and far_copies must be at least one, and their product is at most
* raid_disks.
* The data to be stored is divided into chunks using chunksize. Each device
* is divided into far_copies sections. In each section, chunks are laid out
* in a style similar to raid0, but near_copies copies of each chunk is stored
* (each on a different drive). The starting device for each section is offset
* near_copies from the starting device of the previous section. Thus there
* are (near_copies * far_copies) of each chunk, and each is on a different
* drive. near_copies and far_copies must be at least one, and their product
* is at most raid_disks.
*
* If far_offset is true, then the far_copies are handled a bit differently.
* The copies are still in different stripes, but instead of be very far apart
* on disk, there are adjacent stripes.
* The copies are still in different stripes, but instead of being very far
* apart on disk, there are adjacent stripes.
*
* The far and offset algorithms are handled slightly differently if
* 'use_far_sets' is true. In this case, the array's devices are grouped into
* sets that are (near_copies * far_copies) in size. The far copied stripes
* are still shifted by 'near_copies' devices, but this shifting stays confined
* to the set rather than the entire array. This is done to improve the number
* of device combinations that can fail without causing the array to fail.
* Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
* on a device):
* A B C D A B C D E
* ... ...
* D A B C E A B C D
* Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
* [A B] [C D] [A B] [C D E]
* |...| |...| |...| | ... |
* [B A] [D C] [B A] [E C D]
*/
/*
@ -535,6 +550,13 @@ static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
sector_t stripe;
int dev;
int slot = 0;
int last_far_set_start, last_far_set_size;
last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
last_far_set_start *= geo->far_set_size;
last_far_set_size = geo->far_set_size;
last_far_set_size += (geo->raid_disks % geo->far_set_size);
/* now calculate first sector/dev */
chunk = r10bio->sector >> geo->chunk_shift;
@ -551,15 +573,25 @@ static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
/* and calculate all the others */
for (n = 0; n < geo->near_copies; n++) {
int d = dev;
int set;
sector_t s = sector;
r10bio->devs[slot].addr = sector;
r10bio->devs[slot].devnum = d;
r10bio->devs[slot].addr = s;
slot++;
for (f = 1; f < geo->far_copies; f++) {
set = d / geo->far_set_size;
d += geo->near_copies;
if (d >= geo->raid_disks)
d -= geo->raid_disks;
if ((geo->raid_disks % geo->far_set_size) &&
(d > last_far_set_start)) {
d -= last_far_set_start;
d %= last_far_set_size;
d += last_far_set_start;
} else {
d %= geo->far_set_size;
d += geo->far_set_size * set;
}
s += geo->stride;
r10bio->devs[slot].devnum = d;
r10bio->devs[slot].addr = s;
@ -595,6 +627,20 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
* or recovery, so reshape isn't happening
*/
struct geom *geo = &conf->geo;
int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
int far_set_size = geo->far_set_size;
int last_far_set_start;
if (geo->raid_disks % geo->far_set_size) {
last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
last_far_set_start *= geo->far_set_size;
if (dev >= last_far_set_start) {
far_set_size = geo->far_set_size;
far_set_size += (geo->raid_disks % geo->far_set_size);
far_set_start = last_far_set_start;
}
}
offset = sector & geo->chunk_mask;
if (geo->far_offset) {
@ -602,13 +648,13 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
chunk = sector >> geo->chunk_shift;
fc = sector_div(chunk, geo->far_copies);
dev -= fc * geo->near_copies;
if (dev < 0)
dev += geo->raid_disks;
if (dev < far_set_start)
dev += far_set_size;
} else {
while (sector >= geo->stride) {
sector -= geo->stride;
if (dev < geo->near_copies)
dev += geo->raid_disks - geo->near_copies;
if (dev < (geo->near_copies + far_set_start))
dev += far_set_size - geo->near_copies;
else
dev -= geo->near_copies;
}
@ -1073,6 +1119,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
bio_list_merge(&conf->pending_bio_list, &plug->pending);
conf->pending_count += plug->pending_cnt;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_barrier);
md_wakeup_thread(mddev->thread);
kfree(plug);
return;
@ -1105,6 +1152,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
const unsigned long do_discard = (bio->bi_rw
& (REQ_DISCARD | REQ_SECURE));
const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
unsigned long flags;
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
@ -1460,7 +1508,8 @@ retry_write:
rdev));
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
mbio->bi_rw =
WRITE | do_sync | do_fua | do_discard | do_same;
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
@ -1502,7 +1551,8 @@ retry_write:
r10_bio, rdev));
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
mbio->bi_rw =
WRITE | do_sync | do_fua | do_discard | do_same;
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
@ -3436,7 +3486,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
disks = mddev->raid_disks + mddev->delta_disks;
break;
}
if (layout >> 17)
if (layout >> 18)
return -1;
if (chunk < (PAGE_SIZE >> 9) ||
!is_power_of_2(chunk))
@ -3448,6 +3498,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
geo->near_copies = nc;
geo->far_copies = fc;
geo->far_offset = fo;
geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks;
geo->chunk_mask = chunk - 1;
geo->chunk_shift = ffz(~chunk);
return nc*fc;
@ -3569,6 +3620,8 @@ static int run(struct mddev *mddev)
if (mddev->queue) {
blk_queue_max_discard_sectors(mddev->queue,
mddev->chunk_sectors);
blk_queue_max_write_same_sectors(mddev->queue,
mddev->chunk_sectors);
blk_queue_io_min(mddev->queue, chunk_size);
if (conf->geo.raid_disks % conf->geo.near_copies)
blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);

View File

@ -33,6 +33,11 @@ struct r10conf {
* far_offset, in which case it is
* 1 stripe.
*/
int far_set_size; /* The number of devices in a set,
* where a 'set' are devices that
* contain far/offset copies of
* each other.
*/
int chunk_shift; /* shift from chunks to sectors */
sector_t chunk_mask;
} prev, geo;

View File

@ -1403,7 +1403,7 @@ static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu
&sh->ops.zero_sum_result, percpu->spare_page, &submit);
}
static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
{
int overlap_clear = 0, i, disks = sh->disks;
struct dma_async_tx_descriptor *tx = NULL;
@ -1468,36 +1468,6 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
put_cpu();
}
#ifdef CONFIG_MULTICORE_RAID456
static void async_run_ops(void *param, async_cookie_t cookie)
{
struct stripe_head *sh = param;
unsigned long ops_request = sh->ops.request;
clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
wake_up(&sh->ops.wait_for_ops);
__raid_run_ops(sh, ops_request);
release_stripe(sh);
}
static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
{
/* since handle_stripe can be called outside of raid5d context
* we need to ensure sh->ops.request is de-staged before another
* request arrives
*/
wait_event(sh->ops.wait_for_ops,
!test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
sh->ops.request = ops_request;
atomic_inc(&sh->count);
async_schedule(async_run_ops, sh);
}
#else
#define raid_run_ops __raid_run_ops
#endif
static int grow_one_stripe(struct r5conf *conf)
{
struct stripe_head *sh;
@ -1506,9 +1476,6 @@ static int grow_one_stripe(struct r5conf *conf)
return 0;
sh->raid_conf = conf;
#ifdef CONFIG_MULTICORE_RAID456
init_waitqueue_head(&sh->ops.wait_for_ops);
#endif
spin_lock_init(&sh->stripe_lock);
@ -1627,9 +1594,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
break;
nsh->raid_conf = conf;
#ifdef CONFIG_MULTICORE_RAID456
init_waitqueue_head(&nsh->ops.wait_for_ops);
#endif
spin_lock_init(&nsh->stripe_lock);
list_add(&nsh->lru, &newstripes);

View File

@ -1629,7 +1629,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* If this is the first slave, then we need to set the master's hardware
* address to be the same as the slave's. */
if (bond->dev_addr_from_first)
if (bond->slave_cnt == 0 && bond->dev_addr_from_first)
bond_set_dev_addr(bond->dev, slave_dev);
new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);

View File

@ -301,12 +301,16 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
ring->start);
} else {
/* Omit CRC. */
len -= ETH_FCS_LEN;
new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
if (new_skb) {
skb_put(new_skb, len);
skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
new_skb->data,
len);
skb_checksum_none_assert(skb);
new_skb->protocol =
eth_type_trans(new_skb, bgmac->net_dev);
netif_receive_skb(new_skb);

View File

@ -3142,7 +3142,7 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
tsum = ~csum_fold(csum_add((__force __wsum) csum,
csum_partial(t_header, -fix, 0)));
return bswab16(csum);
return bswab16(tsum);
}
static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)

View File

@ -281,6 +281,8 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)
cmd->lp_advertising |= ADVERTISED_10000baseT_Full;
if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
}
cmd->maxtxpkt = 0;
@ -463,6 +465,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ADVERTISED_10000baseKR_Full))
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
}
} else { /* forced speed */
/* advertise the requested speed and duplex if supported */

View File

@ -10422,6 +10422,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
0x0);
if (phy->type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
/* Disable MI_INT interrupt before setting LED4
* source to constant off.
*/
if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
params->port*4) &
NIG_MASK_MI_INT) {
params->link_flags |=
LINK_FLAGS_INT_DISABLED;
bnx2x_bits_dis(
bp,
NIG_REG_MASK_INTERRUPT_PORT0 +
params->port*4,
NIG_MASK_MI_INT);
}
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_SIGNAL_MASK,
0x0);
}
}
break;
case LED_MODE_ON:
@ -10468,6 +10490,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
0x20);
if (phy->type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
/* Disable MI_INT interrupt before setting LED4
* source to constant on.
*/
if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
params->port*4) &
NIG_MASK_MI_INT) {
params->link_flags |=
LINK_FLAGS_INT_DISABLED;
bnx2x_bits_dis(
bp,
NIG_REG_MASK_INTERRUPT_PORT0 +
params->port*4,
NIG_MASK_MI_INT);
}
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_SIGNAL_MASK,
0x20);
}
}
break;
@ -10532,6 +10576,22 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL,
val);
if (phy->type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
/* Restore LED4 source to external link,
* and re-enable interrupts.
*/
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_SIGNAL_MASK,
0x40);
if (params->link_flags &
LINK_FLAGS_INT_DISABLED) {
bnx2x_link_int_enable(params);
params->link_flags &=
~LINK_FLAGS_INT_DISABLED;
}
}
}
break;
}
@ -11791,6 +11851,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
phy->media_type = ETH_PHY_KR;
phy->flags |= FLAGS_WC_DUAL_MODE;
phy->supported &= (SUPPORTED_20000baseKR2_Full |
SUPPORTED_10000baseT_Full |
SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_FIBRE |
SUPPORTED_Pause |
@ -13437,7 +13499,7 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
struct bnx2x_phy *phy = &params->phy[INT_PHY];
bnx2x_set_aer_mmd(params, phy);
if ((phy->supported & SUPPORTED_20000baseKR2_Full) &&
(phy->speed_cap_mask & SPEED_20000))
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
bnx2x_check_kr2_wa(params, vars, phy);
bnx2x_check_over_curr(params, vars);
if (vars->rx_tx_asic_rst)

View File

@ -307,7 +307,8 @@ struct link_params {
struct bnx2x *bp;
u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
req_flow_ctrl is set to AUTO */
u16 rsrv1;
u16 link_flags;
#define LINK_FLAGS_INT_DISABLED (1<<0)
u32 lfa_base;
};

View File

@ -349,6 +349,7 @@ struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
u8 __iomem *db; /* Door Bell */
struct mutex mbox_lock; /* For serializing mbox cmds to BE card */

View File

@ -473,19 +473,17 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
return 0;
}
static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
static u16 be_POST_stage_get(struct be_adapter *adapter)
{
u32 sem;
u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
SLIPORT_SEMAPHORE_OFFSET_BE;
pci_read_config_dword(adapter->pdev, reg, &sem);
*stage = sem & POST_STAGE_MASK;
if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK)
return -1;
if (BEx_chip(adapter))
sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
else
return 0;
pci_read_config_dword(adapter->pdev,
SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
return sem & POST_STAGE_MASK;
}
int lancer_wait_ready(struct be_adapter *adapter)
@ -579,19 +577,17 @@ int be_fw_wait_ready(struct be_adapter *adapter)
}
do {
status = be_POST_stage_get(adapter, &stage);
if (status) {
dev_err(dev, "POST error; stage=0x%x\n", stage);
return -1;
} else if (stage != POST_STAGE_ARMFW_RDY) {
if (msleep_interruptible(2000)) {
dev_err(dev, "Waiting for POST aborted\n");
return -EINTR;
}
timeout += 2;
} else {
stage = be_POST_stage_get(adapter);
if (stage == POST_STAGE_ARMFW_RDY)
return 0;
dev_info(dev, "Waiting for POST, %ds elapsed\n",
timeout);
if (msleep_interruptible(2000)) {
dev_err(dev, "Waiting for POST aborted\n");
return -EINTR;
}
timeout += 2;
} while (timeout < 60);
dev_err(dev, "POST timeout; stage=0x%x\n", stage);

View File

@ -32,8 +32,8 @@
#define MPU_EP_CONTROL 0
/********** MPU semphore: used for SH & BE *************/
#define SLIPORT_SEMAPHORE_OFFSET_BE 0x7c
#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94
#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */
#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */
#define POST_STAGE_MASK 0x0000FFFF
#define POST_ERR_MASK 0x1
#define POST_ERR_SHIFT 31

View File

@ -3688,6 +3688,8 @@ static void be_netdev_init(struct net_device *netdev)
static void be_unmap_pci_bars(struct be_adapter *adapter)
{
if (adapter->csr)
pci_iounmap(adapter->pdev, adapter->csr);
if (adapter->db)
pci_iounmap(adapter->pdev, adapter->db);
}
@ -3721,6 +3723,12 @@ static int be_map_pci_bars(struct be_adapter *adapter)
adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
SLI_INTF_IF_TYPE_SHIFT;
if (BEx_chip(adapter) && be_physfn(adapter)) {
adapter->csr = pci_iomap(adapter->pdev, 2, 0);
if (adapter->csr == NULL)
return -ENOMEM;
}
addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
if (addr == NULL)
goto pci_map_err;
@ -4329,6 +4337,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
pci_restore_state(pdev);
/* Check if card is ok and fw is ready */
dev_info(&adapter->pdev->dev,
"Waiting for FW to be ready after EEH reset\n");
status = be_fw_wait_ready(adapter);
if (status)
return PCI_ERS_RESULT_DISCONNECT;

View File

@ -246,14 +246,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct bufdesc *bdp;
void *bufaddr;
unsigned short status;
unsigned long flags;
unsigned int index;
if (!fep->link) {
/* Link is down or autonegotiation is in progress. */
return NETDEV_TX_BUSY;
}
spin_lock_irqsave(&fep->hw_lock, flags);
/* Fill in a Tx ring entry */
bdp = fep->cur_tx;
@ -264,7 +263,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* This should not happen, since ndev->tbusy should be set.
*/
printk("%s: tx queue full!.\n", ndev->name);
spin_unlock_irqrestore(&fep->hw_lock, flags);
return NETDEV_TX_BUSY;
}
@ -280,13 +278,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* 4-byte boundaries. Use bounce buffers to copy data
* and get it aligned. Ugh.
*/
if (fep->bufdesc_ex)
index = (struct bufdesc_ex *)bdp -
(struct bufdesc_ex *)fep->tx_bd_base;
else
index = bdp - fep->tx_bd_base;
if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
unsigned int index;
if (fep->bufdesc_ex)
index = (struct bufdesc_ex *)bdp -
(struct bufdesc_ex *)fep->tx_bd_base;
else
index = bdp - fep->tx_bd_base;
memcpy(fep->tx_bounce[index], skb->data, skb->len);
bufaddr = fep->tx_bounce[index];
}
@ -300,10 +298,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
swap_buffer(bufaddr, skb->len);
/* Save skb pointer */
fep->tx_skbuff[fep->skb_cur] = skb;
ndev->stats.tx_bytes += skb->len;
fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
fep->tx_skbuff[index] = skb;
/* Push the data cache so the CPM does not get stale memory
* data.
@ -331,25 +326,21 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_TX_INT;
}
}
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
/* If this was the last BD in the ring, start at the beginning again. */
if (status & BD_ENET_TX_WRAP)
bdp = fep->tx_bd_base;
else
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
if (bdp == fep->dirty_tx) {
fep->tx_full = 1;
netif_stop_queue(ndev);
}
fep->cur_tx = bdp;
skb_tx_timestamp(skb);
if (fep->cur_tx == fep->dirty_tx)
netif_stop_queue(ndev);
spin_unlock_irqrestore(&fep->hw_lock, flags);
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
skb_tx_timestamp(skb);
return NETDEV_TX_OK;
}
@ -406,11 +397,8 @@ fec_restart(struct net_device *ndev, int duplex)
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
* RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
fep->cur_rx = fep->rx_bd_base;
/* Reset SKB transmit buffers. */
fep->skb_cur = fep->skb_dirty = 0;
for (i = 0; i <= TX_RING_MOD_MASK; i++) {
if (fep->tx_skbuff[i]) {
dev_kfree_skb_any(fep->tx_skbuff[i]);
@ -573,20 +561,35 @@ fec_enet_tx(struct net_device *ndev)
struct bufdesc *bdp;
unsigned short status;
struct sk_buff *skb;
int index = 0;
fep = netdev_priv(ndev);
spin_lock(&fep->hw_lock);
bdp = fep->dirty_tx;
/* get next bdp of dirty_tx */
if (bdp->cbd_sc & BD_ENET_TX_WRAP)
bdp = fep->tx_bd_base;
else
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
if (bdp == fep->cur_tx && fep->tx_full == 0)
/* current queue is empty */
if (bdp == fep->cur_tx)
break;
if (fep->bufdesc_ex)
index = (struct bufdesc_ex *)bdp -
(struct bufdesc_ex *)fep->tx_bd_base;
else
index = bdp - fep->tx_bd_base;
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
bdp->cbd_bufaddr = 0;
skb = fep->tx_skbuff[fep->skb_dirty];
skb = fep->tx_skbuff[index];
/* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
BD_ENET_TX_RL | BD_ENET_TX_UN |
@ -631,8 +634,9 @@ fec_enet_tx(struct net_device *ndev)
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
fep->tx_skbuff[fep->skb_dirty] = NULL;
fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
fep->tx_skbuff[index] = NULL;
fep->dirty_tx = bdp;
/* Update pointer to next buffer descriptor to be transmitted */
if (status & BD_ENET_TX_WRAP)
@ -642,14 +646,12 @@ fec_enet_tx(struct net_device *ndev)
/* Since we have freed up a buffer, the ring is no longer full
*/
if (fep->tx_full) {
fep->tx_full = 0;
if (fep->dirty_tx != fep->cur_tx) {
if (netif_queue_stopped(ndev))
netif_wake_queue(ndev);
}
}
fep->dirty_tx = bdp;
spin_unlock(&fep->hw_lock);
return;
}
@ -816,7 +818,7 @@ fec_enet_interrupt(int irq, void *dev_id)
int_events = readl(fep->hwp + FEC_IEVENT);
writel(int_events, fep->hwp + FEC_IEVENT);
if (int_events & FEC_ENET_RXF) {
if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
ret = IRQ_HANDLED;
/* Disable the RX interrupt */
@ -827,15 +829,6 @@ fec_enet_interrupt(int irq, void *dev_id)
}
}
/* Transmit OK, or non-fatal error. Update the buffer
* descriptors. FEC handles all errors, we just discover
* them as part of the transmit process.
*/
if (int_events & FEC_ENET_TXF) {
ret = IRQ_HANDLED;
fec_enet_tx(ndev);
}
if (int_events & FEC_ENET_MII) {
ret = IRQ_HANDLED;
complete(&fep->mdio_done);
@ -851,6 +844,8 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
int pkts = fec_enet_rx(ndev, budget);
struct fec_enet_private *fep = netdev_priv(ndev);
fec_enet_tx(ndev);
if (pkts < budget) {
napi_complete(napi);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@ -1646,6 +1641,7 @@ static int fec_enet_init(struct net_device *ndev)
/* ...and the same for transmit */
bdp = fep->tx_bd_base;
fep->cur_tx = bdp;
for (i = 0; i < TX_RING_SIZE; i++) {
/* Initialize the BD for every fragment in the page. */
@ -1657,6 +1653,7 @@ static int fec_enet_init(struct net_device *ndev)
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
fep->dirty_tx = bdp;
fec_restart(ndev, 0);

View File

@ -97,6 +97,13 @@ struct bufdesc {
unsigned short cbd_sc; /* Control and status info */
unsigned long cbd_bufaddr; /* Buffer address */
};
#else
struct bufdesc {
unsigned short cbd_sc; /* Control and status info */
unsigned short cbd_datlen; /* Data length */
unsigned long cbd_bufaddr; /* Buffer address */
};
#endif
struct bufdesc_ex {
struct bufdesc desc;
@ -107,14 +114,6 @@ struct bufdesc_ex {
unsigned short res0[4];
};
#else
struct bufdesc {
unsigned short cbd_sc; /* Control and status info */
unsigned short cbd_datlen; /* Data length */
unsigned long cbd_bufaddr; /* Buffer address */
};
#endif
/*
* The following definitions courtesy of commproc.h, which where
* Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
@ -214,8 +213,6 @@ struct fec_enet_private {
unsigned char *tx_bounce[TX_RING_SIZE];
struct sk_buff *tx_skbuff[TX_RING_SIZE];
struct sk_buff *rx_skbuff[RX_RING_SIZE];
ushort skb_cur;
ushort skb_dirty;
/* CPM dual port RAM relative addresses */
dma_addr_t bd_dma;
@ -227,7 +224,6 @@ struct fec_enet_private {
/* The ring entries to be free()ed */
struct bufdesc *dirty_tx;
uint tx_full;
/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
spinlock_t hw_lock;

View File

@ -781,6 +781,59 @@ release:
return ret_val;
}
/**
* e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
* @hw: pointer to the HW structure
* @link: link up bool flag
*
* When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
* preventing further DMA write requests. Workaround the issue by disabling
* the de-assertion of the clock request when in 1Gpbs mode.
**/
static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
{
u32 fextnvm6 = er32(FEXTNVM6);
s32 ret_val = 0;
if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) {
u16 kmrn_reg;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
ret_val =
e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
&kmrn_reg);
if (ret_val)
goto release;
ret_val =
e1000e_write_kmrn_reg_locked(hw,
E1000_KMRNCTRLSTA_K1_CONFIG,
kmrn_reg &
~E1000_KMRNCTRLSTA_K1_ENABLE);
if (ret_val)
goto release;
usleep_range(10, 20);
ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
ret_val =
e1000e_write_kmrn_reg_locked(hw,
E1000_KMRNCTRLSTA_K1_CONFIG,
kmrn_reg);
release:
hw->phy.ops.release(hw);
} else {
/* clear FEXTNVM6 bit 8 on link down or 10/100 */
ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
}
return ret_val;
}
/**
* e1000_check_for_copper_link_ich8lan - Check for link (Copper)
* @hw: pointer to the HW structure
@ -818,6 +871,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
return ret_val;
}
/* Work-around I218 hang issue */
if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
(hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
ret_val = e1000_k1_workaround_lpt_lp(hw, link);
if (ret_val)
return ret_val;
}
/* Clear link partner's EEE ability */
hw->dev_spec.ich8lan.eee_lp_ability = 0;
@ -3954,8 +4015,16 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
phy_ctrl = er32(PHY_CTRL);
phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
if (hw->phy.type == e1000_phy_i217) {
u16 phy_reg;
u16 phy_reg, device_id = hw->adapter->pdev->device;
if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
(device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
u32 fextnvm6 = er32(FEXTNVM6);
ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
}
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)

View File

@ -92,6 +92,8 @@
#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7

View File

@ -42,6 +42,7 @@
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */

View File

@ -1361,11 +1361,16 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
switch (hw->phy.type) {
case e1000_phy_i210:
case e1000_phy_m88:
if (hw->phy.id == I347AT4_E_PHY_ID ||
hw->phy.id == M88E1112_E_PHY_ID)
switch (hw->phy.id) {
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
case I210_I_PHY_ID:
ret_val = igb_copper_link_setup_m88_gen2(hw);
else
break;
default:
ret_val = igb_copper_link_setup_m88(hw);
break;
}
break;
case e1000_phy_igp_3:
ret_val = igb_copper_link_setup_igp(hw);

View File

@ -447,7 +447,7 @@ struct igb_adapter {
#endif
struct i2c_algo_bit_data i2c_algo;
struct i2c_adapter i2c_adap;
struct igb_i2c_client_list *i2c_clients;
struct i2c_client *i2c_client;
};
#define IGB_FLAG_HAS_MSI (1 << 0)

View File

@ -39,6 +39,10 @@
#include <linux/pci.h>
#ifdef CONFIG_IGB_HWMON
struct i2c_board_info i350_sensor_info = {
I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
};
/* hwmon callback functions */
static ssize_t igb_hwmon_show_location(struct device *dev,
struct device_attribute *attr,
@ -188,6 +192,7 @@ int igb_sysfs_init(struct igb_adapter *adapter)
unsigned int i;
int n_attrs;
int rc = 0;
struct i2c_client *client = NULL;
/* If this method isn't defined we don't support thermals */
if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
@ -198,6 +203,15 @@ int igb_sysfs_init(struct igb_adapter *adapter)
if (rc)
goto exit;
/* init i2c_client */
client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
if (client == NULL) {
dev_info(&adapter->pdev->dev,
"Failed to create new i2c device..\n");
goto exit;
}
adapter->i2c_client = client;
/* Allocation space for max attributes
* max num sensors * values (loc, temp, max, caution)
*/

View File

@ -1923,10 +1923,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
return;
}
static const struct i2c_board_info i350_sensor_info = {
I2C_BOARD_INFO("i350bb", 0Xf8),
};
/* igb_init_i2c - Init I2C interface
* @adapter: pointer to adapter structure
*
@ -6227,13 +6223,6 @@ static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
/* If we spanned a buffer we have a huge mess so test for it */
BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
/* Guarantee this function can be used by verifying buffer sizes */
BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD +
NET_IP_ALIGN +
IGB_TS_HDR_LEN +
ETH_FRAME_LEN +
ETH_FCS_LEN));
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
page = rx_buffer->page;
prefetchw(page);
@ -7724,67 +7713,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
}
}
static DEFINE_SPINLOCK(i2c_clients_lock);
/* igb_get_i2c_client - returns matching client
* in adapters's client list.
* @adapter: adapter struct
* @dev_addr: device address of i2c needed.
*/
static struct i2c_client *
igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
{
ulong flags;
struct igb_i2c_client_list *client_list;
struct i2c_client *client = NULL;
struct i2c_board_info client_info = {
I2C_BOARD_INFO("igb", 0x00),
};
spin_lock_irqsave(&i2c_clients_lock, flags);
client_list = adapter->i2c_clients;
/* See if we already have an i2c_client */
while (client_list) {
if (client_list->client->addr == (dev_addr >> 1)) {
client = client_list->client;
goto exit;
} else {
client_list = client_list->next;
}
}
/* no client_list found, create a new one */
client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC);
if (client_list == NULL)
goto exit;
/* dev_addr passed to us is left-shifted by 1 bit
* i2c_new_device call expects it to be flush to the right.
*/
client_info.addr = dev_addr >> 1;
client_info.platform_data = adapter;
client_list->client = i2c_new_device(&adapter->i2c_adap, &client_info);
if (client_list->client == NULL) {
dev_info(&adapter->pdev->dev,
"Failed to create new i2c device..\n");
goto err_no_client;
}
/* insert new client at head of list */
client_list->next = adapter->i2c_clients;
adapter->i2c_clients = client_list;
client = client_list->client;
goto exit;
err_no_client:
kfree(client_list);
exit:
spin_unlock_irqrestore(&i2c_clients_lock, flags);
return client;
}
/* igb_read_i2c_byte - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
@ -7798,7 +7726,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
struct i2c_client *this_client = adapter->i2c_client;
s32 status;
u16 swfw_mask = 0;
@ -7835,7 +7763,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data)
{
struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
struct i2c_client *this_client = adapter->i2c_client;
s32 status;
u16 swfw_mask = E1000_SWFW_PHY0_SM;

View File

@ -4765,8 +4765,10 @@ static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
rtl_tx_performance_tweak(pdev,
(0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
if (tp->dev->mtu <= ETH_DATA_LEN) {
rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) |
PCI_EXP_DEVCTL_NOSNOOP_EN);
}
}
static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
@ -4789,7 +4791,8 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
rtl_disable_clock_request(pdev);
@ -4822,7 +4825,8 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
}
@ -4841,7 +4845,8 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
RTL_W8(MaxTxPacketSize, TxPacketMax);
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
}
@ -4901,7 +4906,8 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
RTL_W8(MaxTxPacketSize, TxPacketMax);
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
}
@ -4913,7 +4919,8 @@ static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
rtl_csi_access_enable_1(tp);
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
RTL_W8(MaxTxPacketSize, TxPacketMax);
@ -4972,7 +4979,8 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
RTL_W8(MaxTxPacketSize, TxPacketMax);
@ -4998,7 +5006,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);

View File

@ -779,6 +779,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
tx_queue->txd.entries);
}
efx_device_detach_sync(efx);
efx_stop_all(efx);
efx_stop_interrupts(efx, true);
@ -832,6 +833,7 @@ out:
efx_start_interrupts(efx, true);
efx_start_all(efx);
netif_device_attach(efx->net_dev);
return rc;
rollback:
@ -1641,8 +1643,12 @@ static void efx_stop_all(struct efx_nic *efx)
/* Flush efx_mac_work(), refill_workqueue, monitor_work */
efx_flush_all(efx);
/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
/* Stop the kernel transmit interface. This is only valid if
* the device is stopped or detached; otherwise the watchdog
* may fire immediately.
*/
WARN_ON(netif_running(efx->net_dev) &&
netif_device_present(efx->net_dev));
netif_tx_disable(efx->net_dev);
efx_stop_datapath(efx);
@ -1963,16 +1969,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
if (new_mtu > EFX_MAX_MTU)
return -EINVAL;
efx_stop_all(efx);
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
efx_device_detach_sync(efx);
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
net_dev->mtu = new_mtu;
efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
netif_device_attach(efx->net_dev);
return 0;
}

View File

@ -171,9 +171,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
* TX scheduler is stopped when we're done and before
* netif_device_present() becomes false.
*/
netif_tx_lock(dev);
netif_tx_lock_bh(dev);
netif_device_detach(dev);
netif_tx_unlock(dev);
netif_tx_unlock_bh(dev);
}
#endif /* EFX_EFX_H */

View File

@ -210,6 +210,7 @@ struct efx_tx_queue {
* Will be %NULL if the buffer slot is currently free.
* @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
* Will be %NULL if the buffer slot is currently free.
* @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
* @len: Buffer length, in bytes.
* @flags: Flags for buffer and packet state.
*/
@ -219,7 +220,8 @@ struct efx_rx_buffer {
struct sk_buff *skb;
struct page *page;
} u;
unsigned int len;
u16 page_offset;
u16 len;
u16 flags;
};
#define EFX_RX_BUF_PAGE 0x0001

View File

@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold;
static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
struct efx_rx_buffer *buf)
{
/* Offset is always within one page, so we don't need to consider
* the page order.
*/
return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
efx->type->rx_buffer_hash_size;
return buf->page_offset + efx->type->rx_buffer_hash_size;
}
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
{
@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
struct page *page;
unsigned int page_offset;
struct efx_rx_page_state *state;
dma_addr_t dma_addr;
unsigned index, count;
@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
state->dma_addr = dma_addr;
dma_addr += sizeof(struct efx_rx_page_state);
page_offset = sizeof(struct efx_rx_page_state);
split:
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
rx_buf->u.page = page;
rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
rx_buf->flags = EFX_RX_BUF_PAGE;
++rx_queue->added_count;
@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
/* Use the second half of the page */
get_page(page);
dma_addr += (PAGE_SIZE >> 1);
page_offset += (PAGE_SIZE >> 1);
++count;
goto split;
}
@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
}
static void efx_unmap_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf)
struct efx_rx_buffer *rx_buf,
unsigned int used_len)
{
if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
struct efx_rx_page_state *state;
@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
state->dma_addr,
efx_rx_buf_size(efx),
DMA_FROM_DEVICE);
} else if (used_len) {
dma_sync_single_for_cpu(&efx->pci_dev->dev,
rx_buf->dma_addr, used_len,
DMA_FROM_DEVICE);
}
} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf)
{
efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
efx_free_rx_buffer(rx_queue->efx, rx_buf);
}
@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
goto out;
}
/* Release card resources - assumes all RX buffers consumed in-order
* per RX queue
/* Release and/or sync DMA mapping - assumes all RX buffers
* consumed in-order per RX queue
*/
efx_unmap_rx_buffer(efx, rx_buf);
efx_unmap_rx_buffer(efx, rx_buf, len);
/* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it.

View File

@ -731,7 +731,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
writel(vlan, &priv->host_port_regs->port_vlan);
for (i = 0; i < 2; i++)
for (i = 0; i < priv->data.slaves; i++)
slave_write(priv->slaves + i, vlan, reg);
cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,

View File

@ -257,8 +257,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
.name = "Micrel KSZ9021 Gigabit PHY",
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,

View File

@ -44,13 +44,13 @@ MODULE_LICENSE("GPL");
void phy_device_free(struct phy_device *phydev)
{
kfree(phydev);
put_device(&phydev->dev);
}
EXPORT_SYMBOL(phy_device_free);
static void phy_device_release(struct device *dev)
{
phy_device_free(to_phy_device(dev));
kfree(to_phy_device(dev));
}
static struct phy_driver genphy_driver;
@ -201,6 +201,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
there's no driver _already_ loaded. */
request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
device_initialize(&dev->dev);
return dev;
}
EXPORT_SYMBOL(phy_device_create);
@ -363,9 +365,9 @@ int phy_device_register(struct phy_device *phydev)
/* Run all of the fixups for this PHY */
phy_scan_fixups(phydev);
err = device_register(&phydev->dev);
err = device_add(&phydev->dev);
if (err) {
pr_err("phy %d failed to register\n", phydev->addr);
pr_err("PHY %d failed to add\n", phydev->addr);
goto out;
}

View File

@ -156,6 +156,24 @@ config USB_NET_AX8817X
This driver creates an interface named "ethX", where X depends on
what other networking devices you have in use.
config USB_NET_AX88179_178A
tristate "ASIX AX88179/178A USB 3.0/2.0 to Gigabit Ethernet"
depends on USB_USBNET
select CRC32
select PHYLIB
default y
help
This option adds support for ASIX AX88179 based USB 3.0/2.0
to Gigabit Ethernet adapters.
This driver should work with at least the following devices:
* ASIX AX88179
* ASIX AX88178A
* Sitcomm LN-032
This driver creates an interface named "ethX", where X depends on
what other networking devices you have in use.
config USB_NET_CDCETHER
tristate "CDC Ethernet support (smart devices such as cable modems)"
depends on USB_USBNET

View File

@ -9,6 +9,7 @@ obj-$(CONFIG_USB_RTL8150) += rtl8150.o
obj-$(CONFIG_USB_HSO) += hso.o
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
asix-y := asix_devices.o asix_common.o ax88172a.o
obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o

View File

@ -924,6 +924,29 @@ static const struct driver_info ax88178_info = {
.tx_fixup = asix_tx_fixup,
};
/*
* USBLINK 20F9 "USB 2.0 LAN" USB ethernet adapter, typically found in
* no-name packaging.
* USB device strings are:
* 1: Manufacturer: USBLINK
* 2: Product: HG20F9 USB2.0
* 3: Serial: 000003
* Appears to be compatible with Asix 88772B.
*/
static const struct driver_info hg20f9_info = {
.description = "HG20F9 USB 2.0 Ethernet",
.bind = ax88772_bind,
.unbind = ax88772_unbind,
.status = asix_status,
.link_reset = ax88772_link_reset,
.reset = ax88772_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
.data = FLAG_EEPROM_MAC,
};
extern const struct driver_info ax88172a_info;
static const struct usb_device_id products [] = {
@ -1063,6 +1086,14 @@ static const struct usb_device_id products [] = {
/* ASIX 88172a demo board */
USB_DEVICE(0x0b95, 0x172a),
.driver_info = (unsigned long) &ax88172a_info,
}, {
/*
* USBLINK HG20F9 "USB 2.0 LAN"
* Appears to have gazumped Linksys's manufacturer ID but
* doesn't (yet) conflict with any known Linksys product.
*/
USB_DEVICE(0x066b, 0x20f9),
.driver_info = (unsigned long) &hg20f9_info,
},
{ }, // END
};

File diff suppressed because it is too large Load Diff

View File

@ -1213,6 +1213,14 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long) &wwan_info,
},
/* tag Huawei devices as wwan */
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1,
USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
},
/* Huawei NCM devices disguised as vendor specific */
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
.driver_info = (unsigned long)&wwan_info,

View File

@ -961,6 +961,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
tunnel_ip_select_ident(skb, old_iph, &rt->dst);
nf_reset(skb);
vxlan_set_owner(dev, skb);
/* See iptunnel_xmit() */

View File

@ -27,7 +27,7 @@
#define WME_MAX_BA WME_BA_BMP_SIZE
#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
#define ATH_RSSI_DUMMY_MARKER 0x127
#define ATH_RSSI_DUMMY_MARKER 127
#define ATH_RSSI_LPF_LEN 10
#define RSSI_LPF_THRESHOLD -20
#define ATH_RSSI_EP_MULTIPLIER (1<<7)

View File

@ -22,6 +22,7 @@
#include <linux/firmware.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include <net/mac80211.h>

View File

@ -1067,15 +1067,19 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
last_rssi = priv->rx.last_rssi;
if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
ATH_RSSI_EP_MULTIPLIER);
if (ieee80211_is_beacon(hdr->frame_control) &&
!is_zero_ether_addr(common->curbssid) &&
ether_addr_equal(hdr->addr3, common->curbssid)) {
s8 rssi = rxbuf->rxstatus.rs_rssi;
if (rxbuf->rxstatus.rs_rssi < 0)
rxbuf->rxstatus.rs_rssi = 0;
if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
if (ieee80211_is_beacon(fc))
priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
if (rssi < 0)
rssi = 0;
priv->ah->stats.avgbrssi = rssi;
}
rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
rx_status->band = hw->conf.channel->band;

View File

@ -1463,7 +1463,9 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
reset_type = ATH9K_RESET_POWER_ON;
else
reset_type = ATH9K_RESET_COLD;
}
} else if (ah->chip_fullsleep || REG_READ(ah, AR_Q_TXE) ||
(REG_READ(ah, AR_CR) & AR_CR_RXE))
reset_type = ATH9K_RESET_COLD;
if (!ath9k_hw_set_reset_reg(ah, reset_type))
return false;

View File

@ -825,6 +825,11 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card)
sdio_release_host(func);
/* Set fw_ready before queuing any commands so that
* lbs_thread won't block from sending them to firmware.
*/
priv->fw_ready = 1;
/*
* FUNC_INIT is required for SD8688 WLAN/BT multiple functions
*/
@ -839,7 +844,6 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card)
netdev_alert(priv->dev, "CMD_FUNC_INIT cmd failed\n");
}
priv->fw_ready = 1;
wake_up(&card->pwron_waitq);
if (!card->started) {

View File

@ -1117,10 +1117,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
adhoc_join->bss_descriptor.bssid,
adhoc_join->bss_descriptor.ssid);
for (i = 0; bss_desc->supported_rates[i] &&
i < MWIFIEX_SUPPORTED_RATES;
i++)
;
for (i = 0; i < MWIFIEX_SUPPORTED_RATES &&
bss_desc->supported_rates[i]; i++)
;
rates_size = i;
/* Copy Data Rates from the Rates recorded in scan response */

View File

@ -302,7 +302,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
i++;
usleep_range(10, 20);
/* 50ms max wait */
if (i == 50000)
if (i == 5000)
break;
}

View File

@ -55,10 +55,10 @@ config RT61PCI
config RT2800PCI
tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
depends on PCI || RALINK_RT288X || RALINK_RT305X
depends on PCI || SOC_RT288X || SOC_RT305X
select RT2800_LIB
select RT2X00_LIB_PCI if PCI
select RT2X00_LIB_SOC if RALINK_RT288X || RALINK_RT305X
select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
select RT2X00_LIB_FIRMWARE
select RT2X00_LIB_CRYPTO
select CRC_CCITT

View File

@ -89,7 +89,7 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
}
#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
{
void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
@ -107,7 +107,7 @@ static inline int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
{
return -ENOMEM;
}
#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */
#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
#ifdef CONFIG_PCI
static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
@ -1177,7 +1177,7 @@ MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
#endif /* CONFIG_PCI */
MODULE_LICENSE("GPL");
#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
static int rt2800soc_probe(struct platform_device *pdev)
{
return rt2x00soc_probe(pdev, &rt2800pci_ops);
@ -1194,7 +1194,7 @@ static struct platform_driver rt2800soc_driver = {
.suspend = rt2x00soc_suspend,
.resume = rt2x00soc_resume,
};
#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */
#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
#ifdef CONFIG_PCI
static int rt2800pci_probe(struct pci_dev *pci_dev,
@ -1217,7 +1217,7 @@ static int __init rt2800pci_init(void)
{
int ret = 0;
#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
ret = platform_driver_register(&rt2800soc_driver);
if (ret)
return ret;
@ -1225,7 +1225,7 @@ static int __init rt2800pci_init(void)
#ifdef CONFIG_PCI
ret = pci_register_driver(&rt2800pci_driver);
if (ret) {
#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
platform_driver_unregister(&rt2800soc_driver);
#endif
return ret;
@ -1240,7 +1240,7 @@ static void __exit rt2800pci_exit(void)
#ifdef CONFIG_PCI
pci_unregister_driver(&rt2800pci_driver);
#endif
#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
platform_driver_unregister(&rt2800soc_driver);
#endif
}

View File

@ -1236,8 +1236,10 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
*/
if_limit = &rt2x00dev->if_limits_ap;
if_limit->max = rt2x00dev->ops->max_ap_intf;
if_limit->types = BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT);
if_limit->types = BIT(NL80211_IFTYPE_AP);
#ifdef CONFIG_MAC80211_MESH
if_limit->types |= BIT(NL80211_IFTYPE_MESH_POINT);
#endif
/*
* Build up AP interface combinations structure.
@ -1309,7 +1311,9 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
rt2x00dev->hw->wiphy->interface_modes |=
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_WDS);
rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;

View File

@ -1377,74 +1377,57 @@ void rtl92cu_card_disable(struct ieee80211_hw *hw)
void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
{
/* dummy routine needed for callback from rtl_op_configure_filter() */
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
if (check_bssid) {
u8 tmp;
if (IS_NORMAL_CHIP(rtlhal->version)) {
reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
tmp = BIT(4);
} else {
reg_rcr |= RCR_CBSSID;
tmp = BIT(4) | BIT(5);
}
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
(u8 *) (&reg_rcr));
_rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp);
} else {
u8 tmp;
if (IS_NORMAL_CHIP(rtlhal->version)) {
reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
tmp = BIT(4);
} else {
reg_rcr &= ~RCR_CBSSID;
tmp = BIT(4) | BIT(5);
}
reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_RCR, (u8 *) (&reg_rcr));
_rtl92cu_set_bcn_ctrl_reg(hw, tmp, 0);
}
}
/*========================================================================== */
static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw,
enum nl80211_iftype type)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u8 filterout_non_associated_bssid = false;
switch (type) {
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_STATION:
filterout_non_associated_bssid = true;
break;
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_AP:
default:
break;
}
if (filterout_non_associated_bssid) {
if (IS_NORMAL_CHIP(rtlhal->version)) {
switch (rtlphy->current_io_type) {
case IO_CMD_RESUME_DM_BY_SCAN:
reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_RCR, (u8 *)(&reg_rcr));
/* enable update TSF */
_rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
break;
case IO_CMD_PAUSE_DM_BY_SCAN:
reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_RCR, (u8 *)(&reg_rcr));
/* disable update TSF */
_rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
break;
}
} else {
reg_rcr |= (RCR_CBSSID);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
(u8 *)(&reg_rcr));
_rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5)));
}
} else if (filterout_non_associated_bssid == false) {
if (IS_NORMAL_CHIP(rtlhal->version)) {
reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
(u8 *)(&reg_rcr));
_rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
} else {
reg_rcr &= (~RCR_CBSSID);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
(u8 *)(&reg_rcr));
_rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0);
}
}
}
int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (_rtl92cu_set_media_status(hw, type))
return -EOPNOTSUPP;
_rtl92cu_set_check_bssid(hw, type);
if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
if (type != NL80211_IFTYPE_AP)
rtl92cu_set_check_bssid(hw, true);
} else {
rtl92cu_set_check_bssid(hw, false);
}
return 0;
}
@ -2058,8 +2041,6 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
(shortgi_rate << 4) | (shortgi_rate);
}
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
rtl_read_dword(rtlpriv, REG_ARFR0));
}
void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)

View File

@ -881,17 +881,12 @@ static struct vio_driver hvcs_vio_driver = {
/* Only called from hvcs_get_pi please */
static void hvcs_set_pi(struct hvcs_partner_info *pi, struct hvcs_struct *hvcsd)
{
int clclength;
hvcsd->p_unit_address = pi->unit_address;
hvcsd->p_partition_ID = pi->partition_ID;
clclength = strlen(&pi->location_code[0]);
if (clclength > HVCS_CLC_LENGTH)
clclength = HVCS_CLC_LENGTH;
/* copy the null-term char too */
strncpy(&hvcsd->p_location_code[0],
&pi->location_code[0], clclength + 1);
strlcpy(&hvcsd->p_location_code[0],
&pi->location_code[0], sizeof(hvcsd->p_location_code));
}
/*

View File

@ -118,10 +118,8 @@
#ifdef CONFIG_PREEMPT_COUNT
# define preemptible() (preempt_count() == 0 && !irqs_disabled())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define preemptible() 0
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)

View File

@ -854,6 +854,8 @@ type_pf_tresize(struct ip_set *set, bool retried)
retry:
ret = 0;
htable_bits++;
pr_debug("attempt to resize set %s from %u to %u, t %p\n",
set->name, orig->htable_bits, htable_bits, orig);
if (!htable_bits) {
/* In case we have plenty of memory :-) */
pr_warning("Cannot increase the hashsize of set %s further\n",
@ -873,7 +875,7 @@ retry:
data = ahash_tdata(n, j);
m = hbucket(t, HKEY(data, h->initval, htable_bits));
ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), 0,
type_pf_data_timeout(data));
ip_set_timeout_get(type_pf_data_timeout(data)));
if (ret < 0) {
read_unlock_bh(&set->lock);
ahash_destroy(t);

View File

@ -24,6 +24,9 @@ struct smpboot_thread_data;
* parked (cpu offline)
* @unpark: Optional unpark function, called when the thread is
* unparked (cpu online)
* @pre_unpark: Optional unpark function, called before the thread is
* unparked (cpu online). This is not guaranteed to be
* called on the target cpu of the thread. Careful!
* @selfparking: Thread is not parked by the park function.
* @thread_comm: The base name of the thread
*/
@ -37,6 +40,7 @@ struct smp_hotplug_thread {
void (*cleanup)(unsigned int cpu, bool online);
void (*park)(unsigned int cpu);
void (*unpark)(unsigned int cpu);
void (*pre_unpark)(unsigned int cpu);
bool selfparking;
const char *thread_comm;
};

View File

@ -1045,6 +1045,10 @@ static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
if (sysctl_tcp_low_latency || !tp->ucopy.task)
return false;
if (skb->len <= tcp_hdrlen(skb) &&
skb_queue_len(&tp->ucopy.prequeue) == 0)
return false;
__skb_queue_tail(&tp->ucopy.prequeue, skb);
tp->ucopy.memory += skb->truesize;
if (tp->ucopy.memory > sk->sk_rcvbuf) {

View File

@ -209,6 +209,8 @@ static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cp
{
struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
if (ht->pre_unpark)
ht->pre_unpark(cpu);
kthread_unpark(tsk);
}

View File

@ -323,18 +323,10 @@ void irq_enter(void)
static inline void invoke_softirq(void)
{
if (!force_irqthreads) {
#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
if (!force_irqthreads)
__do_softirq();
#else
do_softirq();
#endif
} else {
__local_bh_disable((unsigned long)__builtin_return_address(0),
SOFTIRQ_OFFSET);
else
wakeup_softirqd();
__local_bh_enable(SOFTIRQ_OFFSET);
}
}
/*
@ -342,9 +334,15 @@ static inline void invoke_softirq(void)
*/
void irq_exit(void)
{
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
local_irq_disable();
#else
WARN_ON_ONCE(!irqs_disabled());
#endif
account_irq_exit_time(current);
trace_hardirq_exit();
sub_preempt_count(IRQ_EXIT_OFFSET);
sub_preempt_count(HARDIRQ_OFFSET);
if (!in_interrupt() && local_softirq_pending())
invoke_softirq();
@ -354,7 +352,6 @@ void irq_exit(void)
tick_nohz_irq_exit();
#endif
rcu_irq_exit();
sched_preempt_enable_no_resched();
}
/*

View File

@ -336,7 +336,7 @@ static struct smp_hotplug_thread cpu_stop_threads = {
.create = cpu_stop_create,
.setup = cpu_stop_unpark,
.park = cpu_stop_park,
.unpark = cpu_stop_unpark,
.pre_unpark = cpu_stop_unpark,
.selfparking = true,
};

View File

@ -118,7 +118,7 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
return NULL;
}
void caif_flow_cb(struct sk_buff *skb)
static void caif_flow_cb(struct sk_buff *skb)
{
struct caif_device_entry *caifd;
void (*dtor)(struct sk_buff *skb) = NULL;

View File

@ -81,8 +81,8 @@ static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
layr->up->ctrlcmd(layr->up, ctrl, layr->id);
}
struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
u8 braddr[ETH_ALEN])
static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
u8 braddr[ETH_ALEN])
{
struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC);

View File

@ -4103,7 +4103,7 @@ static void net_rx_action(struct softirq_action *h)
* Allow this to run for 2 jiffies since which will allow
* an average latency of 1.5/HZ.
*/
if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
goto softnet_break;
local_irq_enable();
@ -4780,7 +4780,7 @@ EXPORT_SYMBOL(dev_set_mac_address);
/**
* dev_change_carrier - Change device carrier
* @dev: device
* @new_carries: new value
* @new_carrier: new value
*
* Change device carrier
*/

View File

@ -228,9 +228,11 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_PROT_UNREACH, 0);
}
} else
kfree_skb(skb);
} else {
IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
kfree_skb(skb);
consume_skb(skb);
}
}
}
out:

View File

@ -423,7 +423,7 @@ int ip_options_compile(struct net *net,
put_unaligned_be32(midtime, timeptr);
opt->is_changed = 1;
}
} else {
} else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) {
unsigned int overflow = optptr[3]>>4;
if (overflow == 15) {
pp_ptr = optptr + 3;

View File

@ -5485,6 +5485,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
if (tcp_checksum_complete_user(sk, skb))
goto csum_error;
if ((int)skb->truesize > sk->sk_forward_alloc)
goto step5;
/* Predicted packet is in window by definition.
* seq == rcv_nxt and rcv_wup <= rcv_nxt.
* Hence, check seq<=rcv_wup reduces to:
@ -5496,9 +5499,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
tcp_rcv_rtt_measure_ts(sk, skb);
if ((int)skb->truesize > sk->sk_forward_alloc)
goto step5;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
/* Bulk data transfer: receiver */

View File

@ -241,9 +241,11 @@ resubmit:
icmpv6_send(skb, ICMPV6_PARAMPROB,
ICMPV6_UNK_NEXTHDR, nhoff);
}
} else
kfree_skb(skb);
} else {
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
kfree_skb(skb);
consume_skb(skb);
}
}
rcu_read_unlock();
return 0;

View File

@ -1915,7 +1915,8 @@ void rt6_purge_dflt_routers(struct net *net)
restart:
read_lock_bh(&table->tb6_lock);
for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
(!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
ip6_del_rt(rt);

View File

@ -280,7 +280,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
struct tty_port *port = &self->port;
DECLARE_WAITQUEUE(wait, current);
int retval;
int do_clocal = 0, extra_count = 0;
int do_clocal = 0;
unsigned long flags;
IRDA_DEBUG(2, "%s()\n", __func__ );
@ -289,8 +289,15 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
* If non-blocking mode is set, or the port is not enabled,
* then make the check up front and then exit.
*/
if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
/* nonblock mode is set or port is not enabled */
if (test_bit(TTY_IO_ERROR, &tty->flags)) {
port->flags |= ASYNC_NORMAL_ACTIVE;
return 0;
}
if (filp->f_flags & O_NONBLOCK) {
/* nonblock mode is set */
if (tty->termios.c_cflag & CBAUD)
tty_port_raise_dtr_rts(port);
port->flags |= ASYNC_NORMAL_ACTIVE;
IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ );
return 0;
@ -315,18 +322,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
__FILE__, __LINE__, tty->driver->name, port->count);
spin_lock_irqsave(&port->lock, flags);
if (!tty_hung_up_p(filp)) {
extra_count = 1;
if (!tty_hung_up_p(filp))
port->count--;
}
spin_unlock_irqrestore(&port->lock, flags);
port->blocked_open++;
spin_unlock_irqrestore(&port->lock, flags);
while (1) {
if (tty->termios.c_cflag & CBAUD)
tty_port_raise_dtr_rts(port);
current->state = TASK_INTERRUPTIBLE;
set_current_state(TASK_INTERRUPTIBLE);
if (tty_hung_up_p(filp) ||
!test_bit(ASYNCB_INITIALIZED, &port->flags)) {
@ -361,13 +366,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
__set_current_state(TASK_RUNNING);
remove_wait_queue(&port->open_wait, &wait);
if (extra_count) {
/* ++ is not atomic, so this should be protected - Jean II */
spin_lock_irqsave(&port->lock, flags);
spin_lock_irqsave(&port->lock, flags);
if (!tty_hung_up_p(filp))
port->count++;
spin_unlock_irqrestore(&port->lock, flags);
}
port->blocked_open--;
spin_unlock_irqrestore(&port->lock, flags);
IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
__FILE__, __LINE__, tty->driver->name, port->count);

View File

@ -495,8 +495,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
/* case CS_ISO_8859_9: */
/* case CS_UNICODE: */
default:
IRDA_DEBUG(0, "%s(), charset %s, not supported\n",
__func__, ias_charset_types[charset]);
IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n",
__func__, charset,
charset < ARRAY_SIZE(ias_charset_types) ?
ias_charset_types[charset] :
"(unknown)");
/* Aborting, close connection! */
iriap_disconnect_request(self);

View File

@ -355,6 +355,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
l2tp_xmit_skb(session, skb, session->hdr_len);
sock_put(ps->tunnel_sock);
sock_put(sk);
return error;

View File

@ -1470,7 +1470,8 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
if (ret == -EAGAIN)
ret = 1;
return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST;
return (ret < 0 && ret != -ENOTEMPTY) ? ret :
ret > 0 ? 0 : -IPSET_ERR_EXIST;
}
/* Get headed data of a set */

View File

@ -68,7 +68,8 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock)
}
}
static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen,
int err)
{
struct sock *sk;
struct hlist_node *tmp;
@ -100,7 +101,10 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
nfc_llcp_accept_unlink(accept_sk);
if (err)
accept_sk->sk_err = err;
accept_sk->sk_state = LLCP_CLOSED;
accept_sk->sk_state_change(sk);
bh_unlock_sock(accept_sk);
@ -123,7 +127,10 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
continue;
}
if (err)
sk->sk_err = err;
sk->sk_state = LLCP_CLOSED;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
@ -133,6 +140,36 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
}
write_unlock(&local->sockets.lock);
/*
* If we want to keep the listening sockets alive,
* we don't touch the RAW ones.
*/
if (listen == true)
return;
write_lock(&local->raw_sockets.lock);
sk_for_each_safe(sk, tmp, &local->raw_sockets.head) {
llcp_sock = nfc_llcp_sock(sk);
bh_lock_sock(sk);
nfc_llcp_socket_purge(llcp_sock);
if (err)
sk->sk_err = err;
sk->sk_state = LLCP_CLOSED;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
sock_orphan(sk);
sk_del_node_init(sk);
}
write_unlock(&local->raw_sockets.lock);
}
struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
@ -142,14 +179,9 @@ struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
return local;
}
static void local_release(struct kref *ref)
static void local_cleanup(struct nfc_llcp_local *local, bool listen)
{
struct nfc_llcp_local *local;
local = container_of(ref, struct nfc_llcp_local, ref);
list_del(&local->list);
nfc_llcp_socket_release(local, false);
nfc_llcp_socket_release(local, listen, ENXIO);
del_timer_sync(&local->link_timer);
skb_queue_purge(&local->tx_queue);
cancel_work_sync(&local->tx_work);
@ -159,6 +191,16 @@ static void local_release(struct kref *ref)
del_timer_sync(&local->sdreq_timer);
cancel_work_sync(&local->sdreq_timeout_work);
nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs);
}
static void local_release(struct kref *ref)
{
struct nfc_llcp_local *local;
local = container_of(ref, struct nfc_llcp_local, ref);
list_del(&local->list);
local_cleanup(local, false);
kfree(local);
}
@ -1433,7 +1475,7 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev)
return;
/* Close and purge all existing sockets */
nfc_llcp_socket_release(local, true);
nfc_llcp_socket_release(local, true, 0);
}
void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
@ -1519,6 +1561,8 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev)
return;
}
local_cleanup(local, false);
nfc_llcp_local_put(local);
}

View File

@ -396,6 +396,8 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
pr_debug("Returning sk state %d\n", sk->sk_state);
sk_acceptq_removed(parent);
return sk;
}

View File

@ -82,10 +82,7 @@ static void rds_message_purge(struct rds_message *rm)
void rds_message_put(struct rds_message *rm)
{
rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
if (atomic_read(&rm->m_refcount) == 0) {
printk(KERN_CRIT "danger refcount zero on %p\n", rm);
WARN_ON(1);
}
WARN(!atomic_read(&rm->m_refcount), "danger refcount zero on %p\n", rm);
if (atomic_dec_and_test(&rm->m_refcount)) {
BUG_ON(!list_empty(&rm->m_sock_item));
BUG_ON(!list_empty(&rm->m_conn_item));
@ -197,6 +194,9 @@ struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
{
struct rds_message *rm;
if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message))
return NULL;
rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
if (!rm)
goto out;

View File

@ -298,6 +298,10 @@ static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
new_num_classes == q->max_agg_classes - 1) /* agg no more full */
hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
/* The next assignment may let
* agg->initial_budget > agg->budgetmax
* hold, we will take it into account in charge_actual_service().
*/
agg->budgetmax = new_num_classes * agg->lmax;
new_agg_weight = agg->class_weight * new_num_classes;
agg->inv_w = ONE_FP/new_agg_weight;
@ -817,7 +821,7 @@ static void qfq_make_eligible(struct qfq_sched *q)
unsigned long old_vslot = q->oldV >> q->min_slot_shift;
if (vslot != old_vslot) {
unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1;
unsigned long mask = (1ULL << fls(vslot ^ old_vslot)) - 1;
qfq_move_groups(q, mask, IR, ER);
qfq_move_groups(q, mask, IB, EB);
}
@ -988,12 +992,23 @@ static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
/* Update F according to the actual service received by the aggregate. */
static inline void charge_actual_service(struct qfq_aggregate *agg)
{
/* compute the service received by the aggregate */
u32 service_received = agg->initial_budget - agg->budget;
/* Compute the service received by the aggregate, taking into
* account that, after decreasing the number of classes in
* agg, it may happen that
* agg->initial_budget - agg->budget > agg->bugdetmax
*/
u32 service_received = min(agg->budgetmax,
agg->initial_budget - agg->budget);
agg->F = agg->S + (u64)service_received * agg->inv_w;
}
static inline void qfq_update_agg_ts(struct qfq_sched *q,
struct qfq_aggregate *agg,
enum update_reason reason);
static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg);
static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
{
struct qfq_sched *q = qdisc_priv(sch);
@ -1021,7 +1036,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
in_serv_agg->initial_budget = in_serv_agg->budget =
in_serv_agg->budgetmax;
if (!list_empty(&in_serv_agg->active))
if (!list_empty(&in_serv_agg->active)) {
/*
* Still active: reschedule for
* service. Possible optimization: if no other
@ -1032,8 +1047,9 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
* handle it, we would need to maintain an
* extra num_active_aggs field.
*/
qfq_activate_agg(q, in_serv_agg, requeue);
else if (sch->q.qlen == 0) { /* no aggregate to serve */
qfq_update_agg_ts(q, in_serv_agg, requeue);
qfq_schedule_agg(q, in_serv_agg);
} else if (sch->q.qlen == 0) { /* no aggregate to serve */
q->in_serv_agg = NULL;
return NULL;
}
@ -1052,7 +1068,15 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
qdisc_bstats_update(sch, skb);
agg_dequeue(in_serv_agg, cl, len);
in_serv_agg->budget -= len;
/* If lmax is lowered, through qfq_change_class, for a class
* owning pending packets with larger size than the new value
* of lmax, then the following condition may hold.
*/
if (unlikely(in_serv_agg->budget < len))
in_serv_agg->budget = 0;
else
in_serv_agg->budget -= len;
q->V += (u64)len * IWSUM;
pr_debug("qfq dequeue: len %u F %lld now %lld\n",
len, (unsigned long long) in_serv_agg->F,
@ -1217,17 +1241,11 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl->deficit = agg->lmax;
list_add_tail(&cl->alist, &agg->active);
if (list_first_entry(&agg->active, struct qfq_class, alist) != cl)
return err; /* aggregate was not empty, nothing else to do */
if (list_first_entry(&agg->active, struct qfq_class, alist) != cl ||
q->in_serv_agg == agg)
return err; /* non-empty or in service, nothing else to do */
/* recharge budget */
agg->initial_budget = agg->budget = agg->budgetmax;
qfq_update_agg_ts(q, agg, enqueue);
if (q->in_serv_agg == NULL)
q->in_serv_agg = agg;
else if (agg != q->in_serv_agg)
qfq_schedule_agg(q, agg);
qfq_activate_agg(q, agg, enqueue);
return err;
}
@ -1261,7 +1279,8 @@ static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
/* group was surely ineligible, remove */
__clear_bit(grp->index, &q->bitmaps[IR]);
__clear_bit(grp->index, &q->bitmaps[IB]);
} else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V))
} else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) &&
q->in_serv_agg == NULL)
q->V = roundedS;
grp->S = roundedS;
@ -1284,8 +1303,15 @@ skip_update:
static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
enum update_reason reason)
{
agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */
qfq_update_agg_ts(q, agg, reason);
qfq_schedule_agg(q, agg);
if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */
q->in_serv_agg = agg; /* start serving this aggregate */
/* update V: to be in service, agg must be eligible */
q->oldV = q->V = agg->S;
} else if (agg != q->in_serv_agg)
qfq_schedule_agg(q, agg);
}
static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
@ -1357,8 +1383,6 @@ static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
__set_bit(grp->index, &q->bitmaps[s]);
}
}
qfq_update_eligible(q);
}
static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)

View File

@ -155,7 +155,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
/* SCTP-AUTH extensions*/
INIT_LIST_HEAD(&ep->endpoint_shared_keys);
null_key = sctp_auth_shkey_create(0, GFP_KERNEL);
null_key = sctp_auth_shkey_create(0, gfp);
if (!null_key)
goto nomem;

View File

@ -5653,6 +5653,9 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
if (len < sizeof(sctp_assoc_t))
return -EINVAL;
/* Allow the struct to grow and fill in as much as possible */
len = min_t(size_t, len, sizeof(sas));
if (copy_from_user(&sas, optval, len))
return -EFAULT;
@ -5686,9 +5689,6 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
/* Mark beginning of a new observation period */
asoc->stats.max_obs_rto = asoc->rto_min;
/* Allow the struct to grow and fill in as much as possible */
len = min_t(size_t, len, sizeof(sas));
if (put_user(len, optlen))
return -EFAULT;

View File

@ -41,8 +41,6 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#define MAX_KMALLOC_SIZE 131072
static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in,
__u16 out);
@ -65,7 +63,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
int size;
size = sctp_ssnmap_size(in, out);
if (size <= MAX_KMALLOC_SIZE)
if (size <= KMALLOC_MAX_SIZE)
retval = kmalloc(size, gfp);
else
retval = (struct sctp_ssnmap *)
@ -82,7 +80,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
return retval;
fail_map:
if (size <= MAX_KMALLOC_SIZE)
if (size <= KMALLOC_MAX_SIZE)
kfree(retval);
else
free_pages((unsigned long)retval, get_order(size));
@ -124,7 +122,7 @@ void sctp_ssnmap_free(struct sctp_ssnmap *map)
int size;
size = sctp_ssnmap_size(map->in.len, map->out.len);
if (size <= MAX_KMALLOC_SIZE)
if (size <= KMALLOC_MAX_SIZE)
kfree(map);
else
free_pages((unsigned long)map, get_order(size));

View File

@ -51,7 +51,7 @@
static void sctp_tsnmap_update(struct sctp_tsnmap *map);
static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off,
__u16 len, __u16 *start, __u16 *end);
static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap);
static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size);
/* Initialize a block of memory as a tsnmap. */
struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len,
@ -124,7 +124,7 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
gap = tsn - map->base_tsn;
if (gap >= map->len && !sctp_tsnmap_grow(map, gap))
if (gap >= map->len && !sctp_tsnmap_grow(map, gap + 1))
return -ENOMEM;
if (!sctp_tsnmap_has_gap(map) && gap == 0) {
@ -360,23 +360,24 @@ __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map,
return ngaps;
}
static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap)
static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size)
{
unsigned long *new;
unsigned long inc;
u16 len;
if (gap >= SCTP_TSN_MAP_SIZE)
if (size > SCTP_TSN_MAP_SIZE)
return 0;
inc = ALIGN((gap - map->len),BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT;
inc = ALIGN((size - map->len), BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT;
len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE);
new = kzalloc(len>>3, GFP_ATOMIC);
if (!new)
return 0;
bitmap_copy(new, map->tsn_map, map->max_tsn_seen - map->base_tsn);
bitmap_copy(new, map->tsn_map,
map->max_tsn_seen - map->cumulative_tsn_ack_point);
kfree(map->tsn_map);
map->tsn_map = new;
map->len = len;

Some files were not shown because too many files have changed in this diff Show More