mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-18 07:35:12 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: net/ipv4/inet_diag.c
This commit is contained in:
commit
6dec4ac4ee
@ -4,7 +4,7 @@
|
||||
|
||||
menuconfig ARCNET
|
||||
depends on NETDEVICES && (ISA || PCI || PCMCIA)
|
||||
bool "ARCnet support"
|
||||
tristate "ARCnet support"
|
||||
---help---
|
||||
If you have a network card of this type, say Y and check out the
|
||||
(arguably) beautiful poetry in
|
||||
|
@ -20,7 +20,6 @@
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/netdevice.h>
|
||||
|
@ -608,7 +608,7 @@ static void b44_tx(struct b44 *bp)
|
||||
skb->len,
|
||||
DMA_TO_DEVICE);
|
||||
rp->skb = NULL;
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb_irq(skb);
|
||||
}
|
||||
|
||||
bp->tx_cons = cons;
|
||||
|
@ -10327,6 +10327,43 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy,
|
||||
struct link_params *params, u8 mode)
|
||||
{
|
||||
struct bnx2x *bp = params->bp;
|
||||
u16 temp;
|
||||
|
||||
bnx2x_cl22_write(bp, phy,
|
||||
MDIO_REG_GPHY_SHADOW,
|
||||
MDIO_REG_GPHY_SHADOW_LED_SEL1);
|
||||
bnx2x_cl22_read(bp, phy,
|
||||
MDIO_REG_GPHY_SHADOW,
|
||||
&temp);
|
||||
temp &= 0xff00;
|
||||
|
||||
DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode);
|
||||
switch (mode) {
|
||||
case LED_MODE_FRONT_PANEL_OFF:
|
||||
case LED_MODE_OFF:
|
||||
temp |= 0x00ee;
|
||||
break;
|
||||
case LED_MODE_OPER:
|
||||
temp |= 0x0001;
|
||||
break;
|
||||
case LED_MODE_ON:
|
||||
temp |= 0x00ff;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
bnx2x_cl22_write(bp, phy,
|
||||
MDIO_REG_GPHY_SHADOW,
|
||||
MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
|
||||
struct link_params *params)
|
||||
{
|
||||
@ -11103,7 +11140,7 @@ static struct bnx2x_phy phy_54618se = {
|
||||
.config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
|
||||
.format_fw_ver = (format_fw_ver_t)NULL,
|
||||
.hw_reset = (hw_reset_t)NULL,
|
||||
.set_link_led = (set_link_led_t)NULL,
|
||||
.set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led,
|
||||
.phy_specific_func = (phy_specific_func_t)NULL
|
||||
};
|
||||
/*****************************************************************/
|
||||
|
@ -6990,6 +6990,7 @@ Theotherbitsarereservedandshouldbezero*/
|
||||
#define MDIO_REG_INTR_MASK 0x1b
|
||||
#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
|
||||
#define MDIO_REG_GPHY_SHADOW 0x1c
|
||||
#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10)
|
||||
#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
|
||||
#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
|
||||
#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
|
||||
|
@ -61,9 +61,9 @@
|
||||
#ifdef EHEA_SMALL_QUEUES
|
||||
#define EHEA_MAX_CQE_COUNT 1023
|
||||
#define EHEA_DEF_ENTRIES_SQ 1023
|
||||
#define EHEA_DEF_ENTRIES_RQ1 4095
|
||||
#define EHEA_DEF_ENTRIES_RQ1 1023
|
||||
#define EHEA_DEF_ENTRIES_RQ2 1023
|
||||
#define EHEA_DEF_ENTRIES_RQ3 1023
|
||||
#define EHEA_DEF_ENTRIES_RQ3 511
|
||||
#else
|
||||
#define EHEA_MAX_CQE_COUNT 4080
|
||||
#define EHEA_DEF_ENTRIES_SQ 4080
|
||||
|
@ -371,7 +371,8 @@ static void ehea_update_stats(struct work_struct *work)
|
||||
out_herr:
|
||||
free_page((unsigned long)cb2);
|
||||
resched:
|
||||
schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
|
||||
schedule_delayed_work(&port->stats_work,
|
||||
round_jiffies_relative(msecs_to_jiffies(1000)));
|
||||
}
|
||||
|
||||
static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
|
||||
@ -2434,7 +2435,8 @@ static int ehea_open(struct net_device *dev)
|
||||
}
|
||||
|
||||
mutex_unlock(&port->port_lock);
|
||||
schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
|
||||
schedule_delayed_work(&port->stats_work,
|
||||
round_jiffies_relative(msecs_to_jiffies(1000)));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1421,7 +1421,7 @@ static void veth_receive(struct veth_lpar_connection *cnx,
|
||||
|
||||
/* FIXME: do we need this? */
|
||||
memset(local_list, 0, sizeof(local_list));
|
||||
memset(remote_list, 0, sizeof(VETH_MAX_FRAMES_PER_MSG));
|
||||
memset(remote_list, 0, sizeof(remote_list));
|
||||
|
||||
/* a 0 address marks the end of the valid entries */
|
||||
if (senddata->addr[startchunk] == 0)
|
||||
|
@ -58,10 +58,8 @@
|
||||
|
||||
|
||||
#define TX_DESC_PER_IOCB 8
|
||||
/* The maximum number of frags we handle is based
|
||||
* on PAGE_SIZE...
|
||||
*/
|
||||
#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */
|
||||
|
||||
#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
|
||||
#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
|
||||
#else /* all other page sizes */
|
||||
#define TX_DESC_PER_OAL 0
|
||||
@ -1353,7 +1351,7 @@ struct tx_ring_desc {
|
||||
struct ob_mac_iocb_req *queue_entry;
|
||||
u32 index;
|
||||
struct oal oal;
|
||||
struct map_list map[MAX_SKB_FRAGS + 1];
|
||||
struct map_list map[MAX_SKB_FRAGS + 2];
|
||||
int map_cnt;
|
||||
struct tx_ring_desc *next;
|
||||
};
|
||||
|
@ -781,10 +781,15 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
|
||||
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
|
||||
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
|
||||
|
||||
/* Do not manage MMC IRQ (FIXME) */
|
||||
/* Mask MMC irq, counters are managed in SW and registers
|
||||
* are cleared on each READ eventually. */
|
||||
dwmac_mmc_intr_all_mask(priv->ioaddr);
|
||||
dwmac_mmc_ctrl(priv->ioaddr, mode);
|
||||
memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
|
||||
|
||||
if (priv->dma_cap.rmon) {
|
||||
dwmac_mmc_ctrl(priv->ioaddr, mode);
|
||||
memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
|
||||
} else
|
||||
pr_info(" No MAC Management Counters available");
|
||||
}
|
||||
|
||||
static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
|
||||
@ -1012,8 +1017,7 @@ static int stmmac_open(struct net_device *dev)
|
||||
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
|
||||
priv->xstats.threshold = tc;
|
||||
|
||||
if (priv->dma_cap.rmon)
|
||||
stmmac_mmc_setup(priv);
|
||||
stmmac_mmc_setup(priv);
|
||||
|
||||
/* Start the ball rolling... */
|
||||
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
|
||||
|
@ -3,7 +3,7 @@
|
||||
#
|
||||
|
||||
menuconfig PHYLIB
|
||||
bool "PHY Device support and infrastructure"
|
||||
tristate "PHY Device support and infrastructure"
|
||||
depends on !S390
|
||||
depends on NETDEVICES
|
||||
help
|
||||
|
@ -588,8 +588,6 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
|
||||
|
||||
WARN_ON(priv->fw_state != FW_STATE_READY);
|
||||
|
||||
cancel_work_sync(&priv->work);
|
||||
|
||||
p54spi_power_off(priv);
|
||||
spin_lock_irqsave(&priv->tx_lock, flags);
|
||||
INIT_LIST_HEAD(&priv->tx_pending);
|
||||
@ -597,6 +595,8 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
|
||||
|
||||
priv->fw_state = FW_STATE_OFF;
|
||||
mutex_unlock(&priv->mutex);
|
||||
|
||||
cancel_work_sync(&priv->work);
|
||||
}
|
||||
|
||||
static int __devinit p54spi_probe(struct spi_device *spi)
|
||||
@ -656,6 +656,7 @@ static int __devinit p54spi_probe(struct spi_device *spi)
|
||||
init_completion(&priv->fw_comp);
|
||||
INIT_LIST_HEAD(&priv->tx_pending);
|
||||
mutex_init(&priv->mutex);
|
||||
spin_lock_init(&priv->tx_lock);
|
||||
SET_IEEE80211_DEV(hw, &spi->dev);
|
||||
priv->common.open = p54spi_op_start;
|
||||
priv->common.stop = p54spi_op_stop;
|
||||
|
@ -778,7 +778,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
|
||||
dwrq->flags = 0;
|
||||
dwrq->length = 0;
|
||||
}
|
||||
essid->octets[essid->length] = '\0';
|
||||
essid->octets[dwrq->length] = '\0';
|
||||
memcpy(extra, essid->octets, dwrq->length);
|
||||
kfree(essid);
|
||||
|
||||
|
@ -3771,7 +3771,7 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
|
||||
/* Apparently the data is read from end to start */
|
||||
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, ®);
|
||||
/* The returned value is in CPU order, but eeprom is le */
|
||||
rt2x00dev->eeprom[i] = cpu_to_le32(reg);
|
||||
*(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
|
||||
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, ®);
|
||||
*(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
|
||||
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, ®);
|
||||
|
@ -1021,7 +1021,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
|
||||
pending_idx = *((u16 *)skb->data);
|
||||
xen_netbk_idx_release(netbk, pending_idx);
|
||||
for (j = start; j < i; j++) {
|
||||
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
|
||||
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
|
||||
xen_netbk_idx_release(netbk, pending_idx);
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,7 @@
|
||||
*/
|
||||
|
||||
struct tc_stats {
|
||||
__u64 bytes; /* NUmber of enqueues bytes */
|
||||
__u64 bytes; /* Number of enqueued bytes */
|
||||
__u32 packets; /* Number of enqueued packets */
|
||||
__u32 drops; /* Packets dropped because of lack of resources */
|
||||
__u32 overlimits; /* Number of throttle events when this
|
||||
@ -297,7 +297,7 @@ struct tc_htb_glob {
|
||||
__u32 debug; /* debug flags */
|
||||
|
||||
/* stats */
|
||||
__u32 direct_pkts; /* count of non shapped packets */
|
||||
__u32 direct_pkts; /* count of non shaped packets */
|
||||
};
|
||||
enum {
|
||||
TCA_HTB_UNSPEC,
|
||||
@ -503,7 +503,7 @@ enum {
|
||||
};
|
||||
#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
|
||||
|
||||
/* State transition probablities for 4 state model */
|
||||
/* State transition probabilities for 4 state model */
|
||||
struct tc_netem_gimodel {
|
||||
__u32 p13;
|
||||
__u32 p31;
|
||||
|
@ -205,12 +205,7 @@ dst_feature(const struct dst_entry *dst, u32 feature)
|
||||
|
||||
static inline u32 dst_mtu(const struct dst_entry *dst)
|
||||
{
|
||||
u32 mtu = dst_metric_raw(dst, RTAX_MTU);
|
||||
|
||||
if (!mtu)
|
||||
mtu = dst->ops->default_mtu(dst);
|
||||
|
||||
return mtu;
|
||||
return dst->ops->mtu(dst);
|
||||
}
|
||||
|
||||
/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
|
||||
|
@ -17,7 +17,7 @@ struct dst_ops {
|
||||
int (*gc)(struct dst_ops *ops);
|
||||
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
|
||||
unsigned int (*default_advmss)(const struct dst_entry *);
|
||||
unsigned int (*default_mtu)(const struct dst_entry *);
|
||||
unsigned int (*mtu)(const struct dst_entry *);
|
||||
u32 * (*cow_metrics)(struct dst_entry *, unsigned long);
|
||||
void (*destroy)(struct dst_entry *);
|
||||
void (*ifdown)(struct dst_entry *,
|
||||
|
@ -31,6 +31,7 @@
|
||||
/** struct ip_options - IP Options
|
||||
*
|
||||
* @faddr - Saved first hop address
|
||||
* @nexthop - Saved nexthop address in LSRR and SSRR
|
||||
* @is_data - Options in __data, rather than skb
|
||||
* @is_strictroute - Strict source route
|
||||
* @srr_is_hit - Packet destination addr was our one
|
||||
@ -41,6 +42,7 @@
|
||||
*/
|
||||
struct ip_options {
|
||||
__be32 faddr;
|
||||
__be32 nexthop;
|
||||
unsigned char optlen;
|
||||
unsigned char srr;
|
||||
unsigned char rr;
|
||||
|
@ -71,12 +71,12 @@ struct rtable {
|
||||
struct fib_info *fi; /* for client ref to shared metrics */
|
||||
};
|
||||
|
||||
static inline bool rt_is_input_route(struct rtable *rt)
|
||||
static inline bool rt_is_input_route(const struct rtable *rt)
|
||||
{
|
||||
return rt->rt_route_iif != 0;
|
||||
}
|
||||
|
||||
static inline bool rt_is_output_route(struct rtable *rt)
|
||||
static inline bool rt_is_output_route(const struct rtable *rt)
|
||||
{
|
||||
return rt->rt_route_iif == 0;
|
||||
}
|
||||
|
@ -136,20 +136,21 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
|
||||
|
||||
static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
|
||||
{
|
||||
int tmp;
|
||||
u16 chks;
|
||||
u16 len;
|
||||
__le16 data;
|
||||
|
||||
struct cffrml *this = container_obj(layr);
|
||||
if (this->dofcs) {
|
||||
chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
|
||||
tmp = cpu_to_le16(chks);
|
||||
cfpkt_add_trail(pkt, &tmp, 2);
|
||||
data = cpu_to_le16(chks);
|
||||
cfpkt_add_trail(pkt, &data, 2);
|
||||
} else {
|
||||
cfpkt_pad_trail(pkt, 2);
|
||||
}
|
||||
len = cfpkt_getlen(pkt);
|
||||
tmp = cpu_to_le16(len);
|
||||
cfpkt_add_head(pkt, &tmp, 2);
|
||||
data = cpu_to_le16(len);
|
||||
cfpkt_add_head(pkt, &data, 2);
|
||||
cfpkt_info(pkt)->hdr_len += 2;
|
||||
if (cfpkt_erroneous(pkt)) {
|
||||
pr_err("Packet is erroneous!\n");
|
||||
|
@ -2414,7 +2414,10 @@ static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
|
||||
struct net *net = seq_file_net(seq);
|
||||
struct neigh_table *tbl = state->tbl;
|
||||
|
||||
pn = pn->next;
|
||||
do {
|
||||
pn = pn->next;
|
||||
} while (pn && !net_eq(pneigh_net(pn), net));
|
||||
|
||||
while (!pn) {
|
||||
if (++state->bucket > PNEIGH_HASHMASK)
|
||||
break;
|
||||
|
@ -2279,7 +2279,7 @@ static int skb_prepare_for_shift(struct sk_buff *skb)
|
||||
* @shiftlen: shift up to this many bytes
|
||||
*
|
||||
* Attempts to shift up to shiftlen worth of bytes, which may be less than
|
||||
* the length of the skb, from tgt to skb. Returns number bytes shifted.
|
||||
* the length of the skb, from skb to tgt. Returns number bytes shifted.
|
||||
* It's up to caller to free skb if everything was shifted.
|
||||
*
|
||||
* If @tgt runs out of frags, the whole operation is aborted.
|
||||
|
@ -111,6 +111,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
||||
rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
|
||||
inet->inet_sport, inet->inet_dport, sk);
|
||||
if (IS_ERR(rt)) {
|
||||
err = PTR_ERR(rt);
|
||||
rt = NULL;
|
||||
goto failure;
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ static unsigned long dn_rt_deadline;
|
||||
static int dn_dst_gc(struct dst_ops *ops);
|
||||
static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
|
||||
static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
|
||||
static unsigned int dn_dst_default_mtu(const struct dst_entry *dst);
|
||||
static unsigned int dn_dst_mtu(const struct dst_entry *dst);
|
||||
static void dn_dst_destroy(struct dst_entry *);
|
||||
static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
|
||||
static void dn_dst_link_failure(struct sk_buff *);
|
||||
@ -135,7 +135,7 @@ static struct dst_ops dn_dst_ops = {
|
||||
.gc = dn_dst_gc,
|
||||
.check = dn_dst_check,
|
||||
.default_advmss = dn_dst_default_advmss,
|
||||
.default_mtu = dn_dst_default_mtu,
|
||||
.mtu = dn_dst_mtu,
|
||||
.cow_metrics = dst_cow_metrics_generic,
|
||||
.destroy = dn_dst_destroy,
|
||||
.negative_advice = dn_dst_negative_advice,
|
||||
@ -825,9 +825,11 @@ static unsigned int dn_dst_default_advmss(const struct dst_entry *dst)
|
||||
return dn_mss_from_pmtu(dst->dev, dst_mtu(dst));
|
||||
}
|
||||
|
||||
static unsigned int dn_dst_default_mtu(const struct dst_entry *dst)
|
||||
static unsigned int dn_dst_mtu(const struct dst_entry *dst)
|
||||
{
|
||||
return dst->dev->mtu;
|
||||
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
|
||||
|
||||
return mtu ? : dst->dev->mtu;
|
||||
}
|
||||
|
||||
static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
|
||||
|
@ -1721,7 +1721,8 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
|
||||
if (err) {
|
||||
int j;
|
||||
|
||||
pmc->sfcount[sfmode]--;
|
||||
if (!delta)
|
||||
pmc->sfcount[sfmode]--;
|
||||
for (j=0; j<i; j++)
|
||||
(void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
|
||||
} else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
|
||||
|
@ -108,9 +108,6 @@ static int inet_csk_diag_fill(struct sock *sk,
|
||||
icsk->icsk_ca_ops->name);
|
||||
}
|
||||
|
||||
if ((ext & (1 << (INET_DIAG_TOS - 1))) && (sk->sk_family != AF_INET6))
|
||||
RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
|
||||
|
||||
r->idiag_family = sk->sk_family;
|
||||
r->idiag_state = sk->sk_state;
|
||||
r->idiag_timer = 0;
|
||||
@ -125,6 +122,12 @@ static int inet_csk_diag_fill(struct sock *sk,
|
||||
r->id.idiag_src[0] = inet->inet_rcv_saddr;
|
||||
r->id.idiag_dst[0] = inet->inet_daddr;
|
||||
|
||||
/* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
|
||||
* hence this needs to be included regardless of socket family.
|
||||
*/
|
||||
if (ext & (1 << (INET_DIAG_TOS - 1)))
|
||||
RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
|
||||
|
||||
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
|
||||
if (r->idiag_family == AF_INET6) {
|
||||
const struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
|
@ -84,7 +84,7 @@ int ip_forward(struct sk_buff *skb)
|
||||
|
||||
rt = skb_rtable(skb);
|
||||
|
||||
if (opt->is_strictroute && ip_hdr(skb)->daddr != rt->rt_gateway)
|
||||
if (opt->is_strictroute && opt->nexthop != rt->rt_gateway)
|
||||
goto sr_failed;
|
||||
|
||||
if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
|
||||
|
@ -568,12 +568,13 @@ void ip_forward_options(struct sk_buff *skb)
|
||||
) {
|
||||
if (srrptr + 3 > srrspace)
|
||||
break;
|
||||
if (memcmp(&ip_hdr(skb)->daddr, &optptr[srrptr-1], 4) == 0)
|
||||
if (memcmp(&opt->nexthop, &optptr[srrptr-1], 4) == 0)
|
||||
break;
|
||||
}
|
||||
if (srrptr + 3 <= srrspace) {
|
||||
opt->is_changed = 1;
|
||||
ip_rt_get_source(&optptr[srrptr-1], skb, rt);
|
||||
ip_hdr(skb)->daddr = opt->nexthop;
|
||||
optptr[2] = srrptr+4;
|
||||
} else if (net_ratelimit())
|
||||
printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
|
||||
@ -640,7 +641,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
|
||||
}
|
||||
if (srrptr <= srrspace) {
|
||||
opt->srr_is_hit = 1;
|
||||
iph->daddr = nexthop;
|
||||
opt->nexthop = nexthop;
|
||||
opt->is_changed = 1;
|
||||
}
|
||||
return 0;
|
||||
|
@ -325,7 +325,6 @@ config IP_NF_TARGET_TTL
|
||||
# raw + specific targets
|
||||
config IP_NF_RAW
|
||||
tristate 'raw table support (required for NOTRACK/TRACE)'
|
||||
depends on NETFILTER_ADVANCED
|
||||
help
|
||||
This option adds a `raw' table to iptables. This table is the very
|
||||
first in the netfilter framework and hooks in at the PREROUTING
|
||||
|
@ -138,7 +138,7 @@ static int rt_chain_length_max __read_mostly = 20;
|
||||
|
||||
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
|
||||
static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
|
||||
static unsigned int ipv4_default_mtu(const struct dst_entry *dst);
|
||||
static unsigned int ipv4_mtu(const struct dst_entry *dst);
|
||||
static void ipv4_dst_destroy(struct dst_entry *dst);
|
||||
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
|
||||
static void ipv4_link_failure(struct sk_buff *skb);
|
||||
@ -193,7 +193,7 @@ static struct dst_ops ipv4_dst_ops = {
|
||||
.gc = rt_garbage_collect,
|
||||
.check = ipv4_dst_check,
|
||||
.default_advmss = ipv4_default_advmss,
|
||||
.default_mtu = ipv4_default_mtu,
|
||||
.mtu = ipv4_mtu,
|
||||
.cow_metrics = ipv4_cow_metrics,
|
||||
.destroy = ipv4_dst_destroy,
|
||||
.ifdown = ipv4_dst_ifdown,
|
||||
@ -1814,12 +1814,17 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
|
||||
return advmss;
|
||||
}
|
||||
|
||||
static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
|
||||
static unsigned int ipv4_mtu(const struct dst_entry *dst)
|
||||
{
|
||||
unsigned int mtu = dst->dev->mtu;
|
||||
const struct rtable *rt = (const struct rtable *) dst;
|
||||
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
|
||||
|
||||
if (mtu && rt_is_output_route(rt))
|
||||
return mtu;
|
||||
|
||||
mtu = dst->dev->mtu;
|
||||
|
||||
if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
|
||||
const struct rtable *rt = (const struct rtable *) dst;
|
||||
|
||||
if (rt->rt_gateway != rt->rt_dst && mtu > 576)
|
||||
mtu = 576;
|
||||
@ -2755,9 +2760,11 @@ static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 coo
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
|
||||
static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
|
||||
{
|
||||
return 0;
|
||||
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
|
||||
|
||||
return mtu ? : dst->dev->mtu;
|
||||
}
|
||||
|
||||
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
|
||||
@ -2775,7 +2782,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
|
||||
.protocol = cpu_to_be16(ETH_P_IP),
|
||||
.destroy = ipv4_dst_destroy,
|
||||
.check = ipv4_blackhole_dst_check,
|
||||
.default_mtu = ipv4_blackhole_default_mtu,
|
||||
.mtu = ipv4_blackhole_mtu,
|
||||
.default_advmss = ipv4_default_advmss,
|
||||
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
|
||||
.cow_metrics = ipv4_rt_blackhole_cow_metrics,
|
||||
|
@ -85,7 +85,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
|
||||
* request_sock (formerly open request) hash tables.
|
||||
*/
|
||||
static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
|
||||
const u32 rnd, const u16 synq_hsize)
|
||||
const u32 rnd, const u32 synq_hsize)
|
||||
{
|
||||
u32 c;
|
||||
|
||||
|
@ -1574,7 +1574,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
|
||||
}
|
||||
if (!rt->rt6i_peer)
|
||||
rt6_bind_peer(rt, 1);
|
||||
if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
|
||||
if (!inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
|
||||
goto release;
|
||||
|
||||
if (dev->addr_len) {
|
||||
|
@ -186,7 +186,6 @@ config IP6_NF_MANGLE
|
||||
|
||||
config IP6_NF_RAW
|
||||
tristate 'raw table support (required for TRACE)'
|
||||
depends on NETFILTER_ADVANCED
|
||||
help
|
||||
This option adds a `raw' table to ip6tables. This table is the very
|
||||
first in the netfilter framework and hooks in at the PREROUTING
|
||||
|
@ -77,7 +77,7 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
|
||||
const struct in6_addr *dest);
|
||||
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
|
||||
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
|
||||
static unsigned int ip6_default_mtu(const struct dst_entry *dst);
|
||||
static unsigned int ip6_mtu(const struct dst_entry *dst);
|
||||
static struct dst_entry *ip6_negative_advice(struct dst_entry *);
|
||||
static void ip6_dst_destroy(struct dst_entry *);
|
||||
static void ip6_dst_ifdown(struct dst_entry *,
|
||||
@ -144,7 +144,7 @@ static struct dst_ops ip6_dst_ops_template = {
|
||||
.gc_thresh = 1024,
|
||||
.check = ip6_dst_check,
|
||||
.default_advmss = ip6_default_advmss,
|
||||
.default_mtu = ip6_default_mtu,
|
||||
.mtu = ip6_mtu,
|
||||
.cow_metrics = ipv6_cow_metrics,
|
||||
.destroy = ip6_dst_destroy,
|
||||
.ifdown = ip6_dst_ifdown,
|
||||
@ -155,9 +155,11 @@ static struct dst_ops ip6_dst_ops_template = {
|
||||
.neigh_lookup = ip6_neigh_lookup,
|
||||
};
|
||||
|
||||
static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
|
||||
static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
|
||||
{
|
||||
return 0;
|
||||
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
|
||||
|
||||
return mtu ? : dst->dev->mtu;
|
||||
}
|
||||
|
||||
static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
|
||||
@ -175,7 +177,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
|
||||
.protocol = cpu_to_be16(ETH_P_IPV6),
|
||||
.destroy = ip6_dst_destroy,
|
||||
.check = ip6_dst_check,
|
||||
.default_mtu = ip6_blackhole_default_mtu,
|
||||
.mtu = ip6_blackhole_mtu,
|
||||
.default_advmss = ip6_default_advmss,
|
||||
.update_pmtu = ip6_rt_blackhole_update_pmtu,
|
||||
.cow_metrics = ip6_rt_blackhole_cow_metrics,
|
||||
@ -1041,10 +1043,15 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst)
|
||||
return mtu;
|
||||
}
|
||||
|
||||
static unsigned int ip6_default_mtu(const struct dst_entry *dst)
|
||||
static unsigned int ip6_mtu(const struct dst_entry *dst)
|
||||
{
|
||||
unsigned int mtu = IPV6_MIN_MTU;
|
||||
struct inet6_dev *idev;
|
||||
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
|
||||
|
||||
if (mtu)
|
||||
return mtu;
|
||||
|
||||
mtu = IPV6_MIN_MTU;
|
||||
|
||||
rcu_read_lock();
|
||||
idev = __in6_dev_get(dst->dev);
|
||||
|
@ -1253,6 +1253,13 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
if (!want_cookie || tmp_opt.tstamp_ok)
|
||||
TCP_ECN_create_request(req, tcp_hdr(skb));
|
||||
|
||||
treq->iif = sk->sk_bound_dev_if;
|
||||
|
||||
/* So that link locals have meaning */
|
||||
if (!sk->sk_bound_dev_if &&
|
||||
ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
|
||||
treq->iif = inet6_iif(skb);
|
||||
|
||||
if (!isn) {
|
||||
struct inet_peer *peer = NULL;
|
||||
|
||||
@ -1262,12 +1269,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
atomic_inc(&skb->users);
|
||||
treq->pktopts = skb;
|
||||
}
|
||||
treq->iif = sk->sk_bound_dev_if;
|
||||
|
||||
/* So that link locals have meaning */
|
||||
if (!sk->sk_bound_dev_if &&
|
||||
ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
|
||||
treq->iif = inet6_iif(skb);
|
||||
|
||||
if (want_cookie) {
|
||||
isn = cookie_v6_init_sequence(sk, skb, &req->mss);
|
||||
|
@ -274,9 +274,9 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
|
||||
|
||||
PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack");
|
||||
|
||||
PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
|
||||
"3839 bytes");
|
||||
PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: "
|
||||
"3839 bytes");
|
||||
PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
|
||||
"7935 bytes");
|
||||
|
||||
/*
|
||||
|
@ -260,7 +260,7 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
|
||||
struct ieee80211_radiotap_header *rthdr;
|
||||
unsigned char *pos;
|
||||
__le16 txflags;
|
||||
u16 txflags;
|
||||
|
||||
rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len);
|
||||
|
||||
@ -290,13 +290,13 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
|
||||
txflags = 0;
|
||||
if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
|
||||
!is_multicast_ether_addr(hdr->addr1))
|
||||
txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
|
||||
txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
|
||||
|
||||
if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
|
||||
(info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
|
||||
txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
|
||||
txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
|
||||
else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
|
||||
txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
|
||||
txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
|
||||
|
||||
put_unaligned_le16(txflags, pos);
|
||||
pos += 2;
|
||||
|
@ -542,7 +542,6 @@ config NETFILTER_XT_TARGET_NOTRACK
|
||||
tristate '"NOTRACK" target support'
|
||||
depends on IP_NF_RAW || IP6_NF_RAW
|
||||
depends on NF_CONNTRACK
|
||||
depends on NETFILTER_ADVANCED
|
||||
help
|
||||
The NOTRACK target allows a select rule to specify
|
||||
which packets *not* to enter the conntrack/NAT
|
||||
|
@ -162,8 +162,8 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
|
||||
map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3];
|
||||
map6->list.mask = *mask6;
|
||||
map6->list.valid = 1;
|
||||
ret_val = netlbl_af4list_add(&map4->list,
|
||||
&addrmap->list4);
|
||||
ret_val = netlbl_af6list_add(&map6->list,
|
||||
&addrmap->list6);
|
||||
if (ret_val != 0)
|
||||
goto cfg_unlbl_map_add_failure;
|
||||
break;
|
||||
|
@ -2037,6 +2037,10 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
|
||||
}
|
||||
|
||||
request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
|
||||
if (!request_wiphy) {
|
||||
reg_set_request_processed();
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!last_request->intersect) {
|
||||
int r;
|
||||
|
@ -2382,9 +2382,11 @@ static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
|
||||
return dst_metric_advmss(dst->path);
|
||||
}
|
||||
|
||||
static unsigned int xfrm_default_mtu(const struct dst_entry *dst)
|
||||
static unsigned int xfrm_mtu(const struct dst_entry *dst)
|
||||
{
|
||||
return dst_mtu(dst->path);
|
||||
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
|
||||
|
||||
return mtu ? : dst_mtu(dst->path);
|
||||
}
|
||||
|
||||
static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr)
|
||||
@ -2411,8 +2413,8 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
|
||||
dst_ops->check = xfrm_dst_check;
|
||||
if (likely(dst_ops->default_advmss == NULL))
|
||||
dst_ops->default_advmss = xfrm_default_advmss;
|
||||
if (likely(dst_ops->default_mtu == NULL))
|
||||
dst_ops->default_mtu = xfrm_default_mtu;
|
||||
if (likely(dst_ops->mtu == NULL))
|
||||
dst_ops->mtu = xfrm_mtu;
|
||||
if (likely(dst_ops->negative_advice == NULL))
|
||||
dst_ops->negative_advice = xfrm_negative_advice;
|
||||
if (likely(dst_ops->link_failure == NULL))
|
||||
|
Loading…
Reference in New Issue
Block a user