Commit 56845d78 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	drivers/net/ethernet/atheros/atlx/atl1.c
	drivers/net/ethernet/atheros/atlx/atl1.h

Resolved a conflict between a DMA error bug fix and NAPI
support changes in the atl1 driver.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents df8ef8f3 8a9a0ea6
......@@ -7572,8 +7572,8 @@ F: Documentation/filesystems/xfs.txt
F: fs/xfs/
XILINX AXI ETHERNET DRIVER
M: Ariane Keller <ariane.keller@tik.ee.ethz.ch>
M: Daniel Borkmann <daniel.borkmann@tik.ee.ethz.ch>
M: Anirudha Sarangi <anirudh@xilinx.com>
M: John Linn <John.Linn@xilinx.com>
S: Maintained
F: drivers/net/ethernet/xilinx/xilinx_axienet*
......
......@@ -2528,7 +2528,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
"pcie phy link down %x\n", status);
if (netif_running(adapter->netdev)) { /* reset MAC */
atlx_irq_disable(adapter);
schedule_work(&adapter->pcie_dma_to_rst_task);
schedule_work(&adapter->reset_dev_task);
return IRQ_HANDLED;
}
}
......@@ -2540,7 +2540,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
"pcie DMA r/w error (status = 0x%x)\n",
status);
atlx_irq_disable(adapter);
schedule_work(&adapter->pcie_dma_to_rst_task);
schedule_work(&adapter->reset_dev_task);
return IRQ_HANDLED;
}
......@@ -2681,10 +2681,10 @@ static void atl1_down(struct atl1_adapter *adapter)
atl1_clean_rx_ring(adapter);
}
static void atl1_tx_timeout_task(struct work_struct *work)
static void atl1_reset_dev_task(struct work_struct *work)
{
struct atl1_adapter *adapter =
container_of(work, struct atl1_adapter, tx_timeout_task);
container_of(work, struct atl1_adapter, reset_dev_task);
struct net_device *netdev = adapter->netdev;
netif_device_detach(netdev);
......@@ -3087,12 +3087,10 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
(unsigned long)adapter);
adapter->phy_timer_pending = false;
INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
err = register_netdev(netdev);
if (err)
goto err_common;
......
......@@ -764,8 +764,8 @@ struct atl1_adapter {
spinlock_t lock;
struct napi_struct napi;
struct work_struct tx_timeout_task;
struct work_struct reset_dev_task;
struct work_struct link_chg_task;
struct work_struct pcie_dma_to_rst_task;
struct timer_list phy_config_timer;
bool phy_timer_pending;
......
......@@ -201,7 +201,7 @@ static void atlx_tx_timeout(struct net_device *netdev)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
/* Do the reset outside of interrupt context */
schedule_work(&adapter->tx_timeout_task);
schedule_work(&adapter->reset_dev_task);
}
/*
......
......@@ -1310,10 +1310,6 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
oem_reg |= HV_OEM_BITS_LPLU;
/* Set Restart auto-neg to activate the bits */
if (!hw->phy.ops.check_reset_block(hw))
oem_reg |= HV_OEM_BITS_RESTART_AN;
} else {
if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
......@@ -1324,6 +1320,11 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
oem_reg |= HV_OEM_BITS_LPLU;
}
/* Set Restart auto-neg to activate the bits */
if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
!hw->phy.ops.check_reset_block(hw))
oem_reg |= HV_OEM_BITS_RESTART_AN;
ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
release:
......@@ -3682,7 +3683,11 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
if (hw->mac.type >= e1000_pchlan) {
e1000_oem_bits_config_ich8lan(hw, false);
e1000_phy_hw_reset_ich8lan(hw);
/* Reset PHY to activate OEM bits on 82577/8 */
if (hw->mac.type == e1000_pchlan)
e1000e_phy_hw_reset_generic(hw);
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return;
......
......@@ -4865,6 +4865,16 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (wufc) {
ixgbe_set_rx_mode(netdev);
/*
* enable the optics for both mult-speed fiber and
* 82599 SFP+ fiber as we can WoL.
*/
if (hw->mac.ops.enable_tx_laser &&
(hw->phy.multispeed_fiber ||
(hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber &&
hw->mac.type == ixgbe_mac_82599EB)))
hw->mac.ops.enable_tx_laser(hw);
/* turn on all-multi mode if wake on multicast is enabled */
if (wufc & IXGBE_WUFC_MC) {
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
......
......@@ -1418,6 +1418,7 @@ static int __devinit ks8851_probe(struct spi_device *spi)
struct net_device *ndev;
struct ks8851_net *ks;
int ret;
unsigned cider;
ndev = alloc_etherdev(sizeof(struct ks8851_net));
if (!ndev)
......@@ -1484,8 +1485,8 @@ static int __devinit ks8851_probe(struct spi_device *spi)
ks8851_soft_reset(ks, GRR_GSR);
/* simple check for a valid chip being connected to the bus */
if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
cider = ks8851_rdreg16(ks, KS_CIDER);
if ((cider & ~CIDER_REV_MASK) != CIDER_ID) {
dev_err(&spi->dev, "failed to read device ID\n");
ret = -ENODEV;
goto err_id;
......@@ -1516,8 +1517,7 @@ static int __devinit ks8851_probe(struct spi_device *spi)
}
netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
ndev->dev_addr, ndev->irq,
CIDER_REV_GET(cider), ndev->dev_addr, ndev->irq,
ks->rc_ccr & CCR_EEPROM ? "has" : "no");
return 0;
......
......@@ -40,7 +40,7 @@
#define DRV_NAME "ks8851_mll"
static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
#define MAX_RECV_FRAMES 32
#define MAX_RECV_FRAMES 255
#define MAX_BUF_SIZE 2048
#define TX_BUF_SIZE 2000
#define RX_BUF_SIZE 2000
......
......@@ -961,6 +961,11 @@ static inline void cp_start_hw (struct cp_private *cp)
cpw8(Cmd, RxOn | TxOn);
}
static void cp_enable_irq(struct cp_private *cp)
{
cpw16_f(IntrMask, cp_intr_mask);
}
static void cp_init_hw (struct cp_private *cp)
{
struct net_device *dev = cp->dev;
......@@ -1000,8 +1005,6 @@ static void cp_init_hw (struct cp_private *cp)
cpw16(MultiIntr, 0);
cpw16_f(IntrMask, cp_intr_mask);
cpw8_f(Cfg9346, Cfg9346_Lock);
}
......@@ -1134,6 +1137,8 @@ static int cp_open (struct net_device *dev)
if (rc)
goto err_out_hw;
cp_enable_irq(cp);
netif_carrier_off(dev);
mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
netif_start_queue(dev);
......@@ -2032,6 +2037,7 @@ static int cp_resume (struct pci_dev *pdev)
/* FIXME: sh*t may happen if the Rx ring buffer is depleted */
cp_init_rings_index (cp);
cp_init_hw (cp);
cp_enable_irq(cp);
netif_start_queue (dev);
spin_lock_irqsave (&cp->lock, flags);
......
......@@ -1166,10 +1166,8 @@ smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat)
/* Quickly dumps bad packets */
static void
smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes)
smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktwords)
{
unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2;
if (likely(pktwords >= 4)) {
unsigned int timeout = 500;
unsigned int val;
......@@ -1233,7 +1231,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
continue;
}
skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN);
skb = netdev_alloc_skb(dev, pktwords << 2);
if (unlikely(!skb)) {
SMSC_WARN(pdata, rx_err,
"Unable to allocate skb for rx packet");
......@@ -1243,14 +1241,12 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
break;
}
skb->data = skb->head;
skb_reset_tail_pointer(skb);
pdata->ops->rx_readfifo(pdata,
(unsigned int *)skb->data, pktwords);
/* Align IP on 16B boundary */
skb_reserve(skb, NET_IP_ALIGN);
skb_put(skb, pktlength - 4);
pdata->ops->rx_readfifo(pdata,
(unsigned int *)skb->head, pktwords);
skb->protocol = eth_type_trans(skb, dev);
skb_checksum_none_assert(skb);
netif_receive_skb(skb);
......@@ -1565,7 +1561,7 @@ static int smsc911x_open(struct net_device *dev)
smsc911x_reg_write(pdata, FIFO_INT, temp);
/* set RX Data offset to 2 bytes for alignment */
smsc911x_reg_write(pdata, RX_CFG, (2 << 8));
smsc911x_reg_write(pdata, RX_CFG, (NET_IP_ALIGN << 8));
/* enable NAPI polling before enabling RX interrupts */
napi_enable(&pdata->napi);
......
......@@ -2,9 +2,7 @@
* Definitions for Xilinx Axi Ethernet device driver.
*
* Copyright (c) 2009 Secret Lab Technologies, Ltd.
* Copyright (c) 2010 Xilinx, Inc. All rights reserved.
* Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
* Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
* Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
*/
#ifndef XILINX_AXIENET_H
......
......@@ -4,9 +4,9 @@
* Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
* Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
* Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
* Copyright (c) 2010 Xilinx, Inc. All rights reserved.
* Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
* Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
* Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
* Copyright (c) 2010 - 2011 PetaLogix
* Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
*
* This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
* and Spartan6.
......
......@@ -2,9 +2,9 @@
* MDIO bus driver for the Xilinx Axi Ethernet device
*
* Copyright (c) 2009 Secret Lab Technologies, Ltd.
* Copyright (c) 2010 Xilinx, Inc. All rights reserved.
* Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
* Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
* Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
* Copyright (c) 2010 - 2011 PetaLogix
* Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
*/
#include <linux/of_address.h>
......
......@@ -235,7 +235,7 @@ struct ppp_net {
/* Prototypes. */
static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct file *file, unsigned int cmd, unsigned long arg);
static int ppp_xmit_process(struct ppp *ppp);
static void ppp_xmit_process(struct ppp *ppp);
static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
static void ppp_push(struct ppp *ppp);
static void ppp_channel_push(struct channel *pch);
......@@ -969,8 +969,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
put_unaligned_be16(proto, pp);
skb_queue_tail(&ppp->file.xq, skb);
if (!ppp_xmit_process(ppp))
netif_stop_queue(dev);
ppp_xmit_process(ppp);
return NETDEV_TX_OK;
outf:
......@@ -1048,11 +1047,10 @@ static void ppp_setup(struct net_device *dev)
* Called to do any work queued up on the transmit side
* that can now be done.
*/
static int
static void
ppp_xmit_process(struct ppp *ppp)
{
struct sk_buff *skb;
int ret = 0;
ppp_xmit_lock(ppp);
if (!ppp->closing) {
......@@ -1062,13 +1060,12 @@ ppp_xmit_process(struct ppp *ppp)
ppp_send_frame(ppp, skb);
/* If there's no work left to do, tell the core net
code that we can accept some more. */
if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) {
if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
netif_wake_queue(ppp->dev);
ret = 1;
}
else
netif_stop_queue(ppp->dev);
}
ppp_xmit_unlock(ppp);
return ret;
}
static inline struct sk_buff *
......
......@@ -238,7 +238,7 @@ static void handle_tx(struct vhost_net *net)
vq->heads[vq->upend_idx].len = len;
ubuf->callback = vhost_zerocopy_callback;
ubuf->arg = vq->ubufs;
ubuf->ctx = vq->ubufs;
ubuf->desc = vq->upend_idx;
msg.msg_control = ubuf;
msg.msg_controllen = sizeof(ubuf);
......
......@@ -1598,10 +1598,9 @@ void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
kfree(ubufs);
}
void vhost_zerocopy_callback(void *arg)
void vhost_zerocopy_callback(struct ubuf_info *ubuf)
{
struct ubuf_info *ubuf = arg;
struct vhost_ubuf_ref *ubufs = ubuf->arg;
struct vhost_ubuf_ref *ubufs = ubuf->ctx;
struct vhost_virtqueue *vq = ubufs->vq;
/* set len = 1 to mark this desc buffers done DMA */
......
......@@ -188,7 +188,7 @@ bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
unsigned int log_num, u64 len);
void vhost_zerocopy_callback(void *arg);
void vhost_zerocopy_callback(struct ubuf_info *);
int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq);
#define vq_err(vq, fmt, ...) do { \
......
......@@ -238,11 +238,12 @@ enum {
/*
* The callback notifies userspace to release buffers when skb DMA is done in
* lower device, the skb last reference should be 0 when calling this.
* The desc is used to track userspace buffer index.
* The ctx field is used to track device context.
* The desc field is used to track userspace buffer index.
*/
struct ubuf_info {
void (*callback)(void *);
void *arg;
void (*callback)(struct ubuf_info *);
void *ctx;
unsigned long desc;
};
......
......@@ -36,7 +36,11 @@ struct dst_entry {
struct net_device *dev;
struct dst_ops *ops;
unsigned long _metrics;
unsigned long expires;
union {
unsigned long expires;
/* point to where the dst_entry copied from */
struct dst_entry *from;
};
struct dst_entry *path;
struct neighbour __rcu *_neighbour;
#ifdef CONFIG_XFRM
......
......@@ -123,6 +123,48 @@ static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
return ((struct rt6_info *)dst)->rt6i_idev;
}
static inline void rt6_clean_expires(struct rt6_info *rt)
{
if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from)
dst_release(rt->dst.from);
rt->rt6i_flags &= ~RTF_EXPIRES;
rt->dst.expires = 0;
}
static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires)
{
if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from)
dst_release(rt->dst.from);
rt->rt6i_flags |= RTF_EXPIRES;
rt->dst.expires = expires;
}
static inline void rt6_update_expires(struct rt6_info *rt, int timeout)
{
if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from)
dst_release(rt->dst.from);
dst_set_expires(&rt->dst, timeout);
rt->rt6i_flags |= RTF_EXPIRES;
}
static inline void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
{
struct dst_entry *new = (struct dst_entry *) from;
if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from) {
if (new == rt->dst.from)
return;
dst_release(rt->dst.from);
}
rt->rt6i_flags &= ~RTF_EXPIRES;
rt->dst.from = new;
dst_hold(new);
}
struct fib6_walker_t {
struct list_head lh;
struct fib6_node *root, *node;
......
......@@ -802,8 +802,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
ip6_del_rt(rt);
rt = NULL;
} else if (!(rt->rt6i_flags & RTF_EXPIRES)) {
rt->dst.expires = expires;
rt->rt6i_flags |= RTF_EXPIRES;
rt6_set_expires(rt, expires);
}
}
dst_release(&rt->dst);
......@@ -1886,11 +1885,9 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
rt = NULL;
} else if (addrconf_finite_timeout(rt_expires)) {
/* not infinity */
rt->dst.expires = jiffies + rt_expires;
rt->rt6i_flags |= RTF_EXPIRES;
rt6_set_expires(rt, jiffies + rt_expires);
} else {
rt->rt6i_flags &= ~RTF_EXPIRES;
rt->dst.expires = 0;
rt6_clean_expires(rt);
}
} else if (valid_lft) {
clock_t expires = 0;
......
......@@ -673,11 +673,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
&rt->rt6i_gateway)) {
if (!(iter->rt6i_flags & RTF_EXPIRES))
return -EEXIST;
iter->dst.expires = rt->dst.expires;
if (!(rt->rt6i_flags & RTF_EXPIRES)) {
iter->rt6i_flags &= ~RTF_EXPIRES;
iter->dst.expires = 0;
}
if (!(rt->rt6i_flags & RTF_EXPIRES))
rt6_clean_expires(iter);
else
rt6_set_expires(iter, rt->dst.expires);
return -EEXIST;
}
}
......
......@@ -1267,8 +1267,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
}
if (rt)
rt->dst.expires = jiffies + (HZ * lifetime);
rt6_set_expires(rt, jiffies + (HZ * lifetime));
if (ra_msg->icmph.icmp6_hop_limit) {
in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
if (rt)
......
......@@ -62,7 +62,7 @@
#include <linux/sysctl.h>
#endif
static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
const struct in6_addr *dest);
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
......@@ -285,6 +285,10 @@ static void ip6_dst_destroy(struct dst_entry *dst)
rt->rt6i_idev = NULL;
in6_dev_put(idev);
}
if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from)
dst_release(dst->from);
if (peer) {
rt->rt6i_peer = NULL;
inet_putpeer(peer);
......@@ -329,8 +333,17 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
static __inline__ int rt6_check_expired(const struct rt6_info *rt)
{
return (rt->rt6i_flags & RTF_EXPIRES) &&
time_after(jiffies, rt->dst.expires);
struct rt6_info *ort = NULL;
if (rt->rt6i_flags & RTF_EXPIRES) {
if (time_after(jiffies, rt->dst.expires))
return 1;
} else if (rt->dst.from) {
ort = (struct rt6_info *) rt->dst.from;
return (ort->rt6i_flags & RTF_EXPIRES) &&
time_after(jiffies, ort->dst.expires);
}
return 0;
}
static inline int rt6_need_strict(const struct in6_addr *daddr)
......@@ -620,12 +633,11 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
(rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
if (rt) {
if (!addrconf_finite_timeout(lifetime)) {
rt->rt6i_flags &= ~RTF_EXPIRES;
} else {
rt->dst.expires = jiffies + HZ * lifetime;
rt->rt6i_flags |= RTF_EXPIRES;
}
if (!addrconf_finite_timeout(lifetime))
rt6_clean_expires(rt);
else
rt6_set_expires(rt, jiffies + HZ * lifetime);
dst_release(&rt->dst);
}
return 0;
......@@ -730,7 +742,7 @@ int ip6_ins_rt(struct rt6_info *rt)
return __ip6_ins_rt(rt, &info);
}
static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
const struct in6_addr *daddr,
const struct in6_addr *saddr)
{
......@@ -954,10 +966,10 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
rt->rt6i_idev = ort->rt6i_idev;
if (rt->rt6i_idev)
in6_dev_hold(rt->rt6i_idev);
rt->dst.expires = 0;
rt->rt6i_gateway = ort->rt6i_gateway;
rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
rt->rt6i_flags = ort->rt6i_flags;
rt6_clean_expires(rt);
rt->rt6i_metric = 0;
memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
......@@ -1019,10 +1031,9 @@ static void ip6_link_failure(struct sk_buff *skb)
rt = (struct rt6_info *) skb_dst(skb);
if (rt) {
if (rt->rt6i_flags & RTF_CACHE) {
dst_set_expires(&rt->dst, 0);
rt->rt6i_flags |= RTF_EXPIRES;
} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
if (rt->rt6i_flags & RTF_CACHE)
rt6_update_expires(rt, 0);
else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
rt->rt6i_node->fn_sernum = -1;
}
}
......@@ -1289,9 +1300,12 @@ int ip6_route_add(struct fib6_config *cfg)
}
rt->dst.obsolete = -1;
rt->dst.expires = (cfg->fc_flags & RTF_EXPIRES) ?
jiffies + clock_t_to_jiffies(cfg->fc_expires) :
0;
if (cfg->fc_flags & RTF_EXPIRES)
rt6_set_expires(rt, jiffies +
clock_t_to_jiffies(cfg->fc_expires));
else
rt6_clean_expires(rt);
if (cfg->fc_protocol == RTPROT_UNSPEC)
cfg->fc_protocol = RTPROT_BOOT;
......@@ -1736,8 +1750,8 @@ static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr
features |= RTAX_FEATURE_ALLFRAG;
dst_metric_set(&rt->dst, RTAX_FEATURES, features);
}
dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
rt->rt6i_flags |= RTF_MODIFIED;
goto out;
}
......@@ -1765,9 +1779,8 @@ static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr
* which is 10 mins. After 10 mins the decreased pmtu is expired
* and detecting PMTU increase will be automatically happened.
*/
dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
rt6_update_expires(nrt, net->ipv6.sysctl.ip6_rt_mtu_expires);
nrt->rt6i_flags |= RTF_DYNAMIC;
ip6_ins_rt(nrt);
}
out:
......@@ -1799,7 +1812,7 @@ void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *sad
* Misc support functions
*/
static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
const struct in6_addr *dest)
{
struct net *net = dev_net(ort->dst.dev);
......@@ -1819,10 +1832,14 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
if (rt->rt6i_idev)
in6_dev_hold(rt->rt6i_idev);
rt->dst.lastuse = jiffies;
rt->dst.expires = 0;
rt->rt6i_gateway = ort->rt6i_gateway;
rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
rt->rt6i_flags = ort->rt6i_flags;
if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
(RTF_DEFAULT | RTF_ADDRCONF))
rt6_set_from(rt, ort);
else
rt6_clean_expires(rt);
rt->rt6i_metric = 0;
#ifdef CONFIG_IPV6_SUBTREES
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment