Commit 6395ad85 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) The real fix for the ipv6 route metric leak Sabrina was seeing, from
    Cong Wang.

 2) Fix syzbot triggers AF_PACKET v3 ring buffer insufficient room
    conditions, from Willem de Bruijn.

 3) vsock can reinitialize active work struct, fix from Cong Wang.

 4) RXRPC keepalive generator can wedge a cpu, fix from David Howells.

 5) Fix locking in AF_SMC ioctl, from Ursula Braun.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  dsa: slave: eee: Allow ports to use phylink
  net/smc: move sock lock in smc_ioctl()
  net/smc: allow sysctl rmem and wmem defaults for servers
  net/smc: no shutdown in state SMC_LISTEN
  net: aquantia: Fix IFF_ALLMULTI flag functionality
  rxrpc: Fix the keepalive generator [ver #2]
  net/mlx5e: Cleanup of dcbnl related fields
  net/mlx5e: Properly check if hairpin is possible between two functions
  vhost: reset metadata cache when initializing new IOTLB
  llc: use refcount_inc_not_zero() for llc_sap_find()
  dccp: fix undefined behavior with 'cwnd' shift in ccid2_cwnd_restart()
  tipc: fix an interrupt unsafe locking scenario
  vsock: split dwork to avoid reinitializations
  net: thunderx: check for failed allocation lmac->dmacs
  cxgb4: mk_act_open_req() buggers ->{local, peer}_ip on big-endian hosts
  packet: refine ring v3 block size test to hold one frame
  ip6_tunnel: use the right value for ipv4 min mtu check in ip6_tnl_xmit
  ipv6: fix double refcount of fib6_metrics
parents fedb8da9 1be52e97
......@@ -762,7 +762,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
hw_atl_rpfl2multicast_flr_en_set(self,
IS_FILTER_ENABLED(IFF_MULTICAST), 0);
IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
hw_atl_rpfl2_accept_all_mc_packets_set(self,
IS_FILTER_ENABLED(IFF_ALLMULTI));
......
......@@ -1083,6 +1083,8 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count);
lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs),
GFP_KERNEL);
if (!lmac->dmacs)
return -ENOMEM;
/* Enable lmac */
bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
......
......@@ -1038,10 +1038,8 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
req->local_port = cpu_to_be16(f->fs.val.lport);
req->peer_port = cpu_to_be16(f->fs.val.fport);
req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
memcpy(&req->local_ip, f->fs.val.lip, 4);
memcpy(&req->peer_ip, f->fs.val.fip, 4);
req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
f->fs.newvlan == VLAN_REWRITE) |
DELACK_V(f->fs.hitcnts) |
......
......@@ -858,8 +858,6 @@ struct mlx5e_profile {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
} rx_handlers;
void (*netdev_registered_init)(struct mlx5e_priv *priv);
void (*netdev_registered_remove)(struct mlx5e_priv *priv);
int max_tc;
};
......
......@@ -443,16 +443,12 @@ static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
bool is_new;
int err;
if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
return -EINVAL;
if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
return -EINVAL;
if (!MLX5_DSCP_SUPPORTED(priv->mdev))
return -EINVAL;
if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
!MLX5_DSCP_SUPPORTED(priv->mdev))
return -EOPNOTSUPP;
if (app->protocol >= MLX5E_MAX_DSCP)
if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
(app->protocol >= MLX5E_MAX_DSCP))
return -EINVAL;
/* Save the old entry info */
......@@ -500,16 +496,12 @@ static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
struct mlx5e_priv *priv = netdev_priv(dev);
int err;
if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
return -EINVAL;
if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
return -EINVAL;
if (!MLX5_DSCP_SUPPORTED(priv->mdev))
return -EINVAL;
if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
!MLX5_DSCP_SUPPORTED(priv->mdev))
return -EOPNOTSUPP;
if (app->protocol >= MLX5E_MAX_DSCP)
if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
(app->protocol >= MLX5E_MAX_DSCP))
return -EINVAL;
/* Skip if no dscp app entry */
......@@ -1146,7 +1138,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
{
int err;
err = mlx5_set_trust_state(priv->mdev, trust_state);
err = mlx5_set_trust_state(priv->mdev, trust_state);
if (err)
return err;
priv->dcbx_dp.trust_state = trust_state;
......
......@@ -1970,15 +1970,15 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
{
struct mlx5_core_dev *fmdev, *pmdev;
u16 func_id, peer_id;
u64 fsystem_guid, psystem_guid;
fmdev = priv->mdev;
pmdev = peer_priv->mdev;
func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
mlx5_query_nic_vport_system_image_guid(fmdev, &fsystem_guid);
mlx5_query_nic_vport_system_image_guid(pmdev, &psystem_guid);
return (func_id == peer_id);
return (fsystem_guid == psystem_guid);
}
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
......
......@@ -1560,9 +1560,12 @@ int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
d->iotlb = niotlb;
for (i = 0; i < d->nvqs; ++i) {
mutex_lock(&d->vqs[i]->mutex);
d->vqs[i]->iotlb = niotlb;
mutex_unlock(&d->vqs[i]->mutex);
struct vhost_virtqueue *vq = d->vqs[i];
mutex_lock(&vq->mutex);
vq->iotlb = niotlb;
__vhost_vq_meta_reset(vq);
mutex_unlock(&vq->mutex);
}
vhost_umem_clean(oiotlb);
......
......@@ -64,7 +64,8 @@ struct vsock_sock {
struct list_head pending_links;
struct list_head accept_queue;
bool rejected;
struct delayed_work dwork;
struct delayed_work connect_work;
struct delayed_work pending_work;
struct delayed_work close_work;
bool close_work_scheduled;
u32 peer_shutdown;
......@@ -77,7 +78,6 @@ struct vsock_sock {
s64 vsock_stream_has_data(struct vsock_sock *vsk);
s64 vsock_stream_has_space(struct vsock_sock *vsk);
void vsock_pending_work(struct work_struct *work);
struct sock *__vsock_create(struct net *net,
struct socket *sock,
struct sock *parent,
......
......@@ -116,6 +116,11 @@ static inline void llc_sap_hold(struct llc_sap *sap)
refcount_inc(&sap->refcnt);
}
static inline bool llc_sap_hold_safe(struct llc_sap *sap)
{
return refcount_inc_not_zero(&sap->refcnt);
}
void llc_sap_close(struct llc_sap *sap);
static inline void llc_sap_put(struct llc_sap *sap)
......
......@@ -229,14 +229,16 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
u32 cwnd = hc->tx_cwnd, restart_cwnd,
iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache);
s32 delta = now - hc->tx_lsndtime;
hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
/* don't reduce cwnd below the initial window (IW) */
restart_cwnd = min(cwnd, iwnd);
cwnd >>= (now - hc->tx_lsndtime) / hc->tx_rto;
hc->tx_cwnd = max(cwnd, restart_cwnd);
while ((delta -= hc->tx_rto) >= 0 && cwnd > restart_cwnd)
cwnd >>= 1;
hc->tx_cwnd = max(cwnd, restart_cwnd);
hc->tx_cwnd_stamp = now;
hc->tx_cwnd_used = 0;
......
......@@ -639,7 +639,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
int ret;
/* Port's PHY and MAC both need to be EEE capable */
if (!dev->phydev)
if (!dev->phydev && !dp->pl)
return -ENODEV;
if (!ds->ops->set_mac_eee)
......@@ -659,7 +659,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
int ret;
/* Port's PHY and MAC both need to be EEE capable */
if (!dev->phydev)
if (!dev->phydev && !dp->pl)
return -ENODEV;
if (!ds->ops->get_mac_eee)
......
......@@ -1133,12 +1133,8 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
max_headroom += 8;
mtu -= 8;
}
if (skb->protocol == htons(ETH_P_IPV6)) {
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
} else if (mtu < 576) {
mtu = 576;
}
mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
IPV6_MIN_MTU : IPV4_MIN_MTU);
skb_dst_update_pmtu(skb, mtu);
if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
......
......@@ -978,10 +978,6 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
rt->rt6i_flags &= ~RTF_EXPIRES;
rcu_assign_pointer(rt->from, from);
dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
if (from->fib6_metrics != &dst_default_metrics) {
rt->dst._metrics |= DST_METRICS_REFCOUNTED;
refcount_inc(&from->fib6_metrics->refcnt);
}
}
/* Caller must already hold reference to @ort */
......
......@@ -73,8 +73,8 @@ struct llc_sap *llc_sap_find(unsigned char sap_value)
rcu_read_lock_bh();
sap = __llc_sap_find(sap_value);
if (sap)
llc_sap_hold(sap);
if (!sap || !llc_sap_hold_safe(sap))
sap = NULL;
rcu_read_unlock_bh();
return sap;
}
......
......@@ -4226,6 +4226,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
}
if (req->tp_block_nr) {
unsigned int min_frame_size;
/* Sanity tests and some calculations */
err = -EBUSY;
if (unlikely(rb->pg_vec))
......@@ -4248,12 +4250,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
goto out;
if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
goto out;
min_frame_size = po->tp_hdrlen + po->tp_reserve;
if (po->tp_version >= TPACKET_V3 &&
req->tp_block_size <=
BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr))
req->tp_block_size <
BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
goto out;
if (unlikely(req->tp_frame_size < po->tp_hdrlen +
po->tp_reserve))
if (unlikely(req->tp_frame_size < min_frame_size))
goto out;
if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
goto out;
......
......@@ -104,9 +104,9 @@ struct rxrpc_net {
#define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */
u8 peer_keepalive_cursor;
ktime_t peer_keepalive_base;
struct hlist_head peer_keepalive[RXRPC_KEEPALIVE_TIME + 1];
struct hlist_head peer_keepalive_new;
time64_t peer_keepalive_base;
struct list_head peer_keepalive[32];
struct list_head peer_keepalive_new;
struct timer_list peer_keepalive_timer;
struct work_struct peer_keepalive_work;
};
......@@ -295,7 +295,7 @@ struct rxrpc_peer {
struct hlist_head error_targets; /* targets for net error distribution */
struct work_struct error_distributor;
struct rb_root service_conns; /* Service connections */
struct hlist_node keepalive_link; /* Link in net->peer_keepalive[] */
struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
time64_t last_tx_at; /* Last time packet sent here */
seqlock_t service_conn_lock;
spinlock_t lock; /* access lock */
......
......@@ -136,7 +136,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
}
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
conn->params.peer->last_tx_at = ktime_get_real();
conn->params.peer->last_tx_at = ktime_get_seconds();
if (ret < 0)
trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
rxrpc_tx_fail_call_final_resend);
......@@ -245,7 +245,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
return -EAGAIN;
}
conn->params.peer->last_tx_at = ktime_get_real();
conn->params.peer->last_tx_at = ktime_get_seconds();
_leave(" = 0");
return 0;
......
......@@ -85,12 +85,12 @@ static __net_init int rxrpc_init_net(struct net *net)
hash_init(rxnet->peer_hash);
spin_lock_init(&rxnet->peer_hash_lock);
for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++)
INIT_HLIST_HEAD(&rxnet->peer_keepalive[i]);
INIT_HLIST_HEAD(&rxnet->peer_keepalive_new);
INIT_LIST_HEAD(&rxnet->peer_keepalive[i]);
INIT_LIST_HEAD(&rxnet->peer_keepalive_new);
timer_setup(&rxnet->peer_keepalive_timer,
rxrpc_peer_keepalive_timeout, 0);
INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker);
rxnet->peer_keepalive_base = ktime_add(ktime_get_real(), NSEC_PER_SEC);
rxnet->peer_keepalive_base = ktime_get_seconds();
ret = -ENOMEM;
rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net);
......
......@@ -209,7 +209,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
now = ktime_get_real();
if (ping)
call->ping_time = now;
conn->params.peer->last_tx_at = ktime_get_real();
conn->params.peer->last_tx_at = ktime_get_seconds();
if (ret < 0)
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_fail_call_ack);
......@@ -296,7 +296,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
ret = kernel_sendmsg(conn->params.local->socket,
&msg, iov, 1, sizeof(pkt));
conn->params.peer->last_tx_at = ktime_get_real();
conn->params.peer->last_tx_at = ktime_get_seconds();
if (ret < 0)
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_fail_call_abort);
......@@ -391,7 +391,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
* message and update the peer record
*/
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
conn->params.peer->last_tx_at = ktime_get_real();
conn->params.peer->last_tx_at = ktime_get_seconds();
up_read(&conn->params.local->defrag_sem);
if (ret < 0)
......@@ -457,7 +457,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
if (ret == 0) {
ret = kernel_sendmsg(conn->params.local->socket, &msg,
iov, 2, len);
conn->params.peer->last_tx_at = ktime_get_real();
conn->params.peer->last_tx_at = ktime_get_seconds();
opt = IP_PMTUDISC_DO;
kernel_setsockopt(conn->params.local->socket, SOL_IP,
......@@ -475,7 +475,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
if (ret == 0) {
ret = kernel_sendmsg(conn->params.local->socket, &msg,
iov, 2, len);
conn->params.peer->last_tx_at = ktime_get_real();
conn->params.peer->last_tx_at = ktime_get_seconds();
opt = IPV6_PMTUDISC_DO;
kernel_setsockopt(conn->params.local->socket,
......@@ -599,6 +599,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
trace_rxrpc_tx_fail(peer->debug_id, 0, ret,
rxrpc_tx_fail_version_keepalive);
peer->last_tx_at = ktime_get_real();
peer->last_tx_at = ktime_get_seconds();
_leave("");
}
......@@ -350,97 +350,117 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
}
/*
* Perform keep-alive pings with VERSION packets to keep any NAT alive.
* Perform keep-alive pings.
*/
void rxrpc_peer_keepalive_worker(struct work_struct *work)
static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
struct list_head *collector,
time64_t base,
u8 cursor)
{
struct rxrpc_net *rxnet =
container_of(work, struct rxrpc_net, peer_keepalive_work);
struct rxrpc_peer *peer;
unsigned long delay;
ktime_t base, now = ktime_get_real();
s64 diff;
u8 cursor, slot;
const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
time64_t keepalive_at;
int slot;
base = rxnet->peer_keepalive_base;
cursor = rxnet->peer_keepalive_cursor;
spin_lock_bh(&rxnet->peer_hash_lock);
_enter("%u,%lld", cursor, ktime_sub(now, base));
while (!list_empty(collector)) {
peer = list_entry(collector->next,
struct rxrpc_peer, keepalive_link);
next_bucket:
diff = ktime_to_ns(ktime_sub(now, base));
if (diff < 0)
goto resched;
list_del_init(&peer->keepalive_link);
if (!rxrpc_get_peer_maybe(peer))
continue;
_debug("at %u", cursor);
spin_lock_bh(&rxnet->peer_hash_lock);
next_peer:
if (!rxnet->live) {
spin_unlock_bh(&rxnet->peer_hash_lock);
goto out;
}
/* Everything in the bucket at the cursor is processed this second; the
* bucket at cursor + 1 goes now + 1s and so on...
*/
if (hlist_empty(&rxnet->peer_keepalive[cursor])) {
if (hlist_empty(&rxnet->peer_keepalive_new)) {
spin_unlock_bh(&rxnet->peer_hash_lock);
goto emptied_bucket;
keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
slot = keepalive_at - base;
_debug("%02x peer %u t=%d {%pISp}",
cursor, peer->debug_id, slot, &peer->srx.transport);
if (keepalive_at <= base ||
keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
rxrpc_send_keepalive(peer);
slot = RXRPC_KEEPALIVE_TIME;
}
hlist_move_list(&rxnet->peer_keepalive_new,
&rxnet->peer_keepalive[cursor]);
/* A transmission to this peer occurred since last we examined
* it so put it into the appropriate future bucket.
*/
slot += cursor;
slot &= mask;
spin_lock_bh(&rxnet->peer_hash_lock);
list_add_tail(&peer->keepalive_link,
&rxnet->peer_keepalive[slot & mask]);
rxrpc_put_peer(peer);
}
peer = hlist_entry(rxnet->peer_keepalive[cursor].first,
struct rxrpc_peer, keepalive_link);
hlist_del_init(&peer->keepalive_link);
if (!rxrpc_get_peer_maybe(peer))
goto next_peer;
spin_unlock_bh(&rxnet->peer_hash_lock);
}
_debug("peer %u {%pISp}", peer->debug_id, &peer->srx.transport);
/*
* Perform keep-alive pings with VERSION packets to keep any NAT alive.
*/
void rxrpc_peer_keepalive_worker(struct work_struct *work)
{
struct rxrpc_net *rxnet =
container_of(work, struct rxrpc_net, peer_keepalive_work);
const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
time64_t base, now, delay;
u8 cursor, stop;
LIST_HEAD(collector);
recalc:
diff = ktime_divns(ktime_sub(peer->last_tx_at, base), NSEC_PER_SEC);
if (diff < -30 || diff > 30)
goto send; /* LSW of 64-bit time probably wrapped on 32-bit */
diff += RXRPC_KEEPALIVE_TIME - 1;
if (diff < 0)
goto send;
now = ktime_get_seconds();
base = rxnet->peer_keepalive_base;
cursor = rxnet->peer_keepalive_cursor;
_enter("%lld,%u", base - now, cursor);
slot = (diff > RXRPC_KEEPALIVE_TIME - 1) ? RXRPC_KEEPALIVE_TIME - 1 : diff;
if (slot == 0)
goto send;
if (!rxnet->live)
return;
/* A transmission to this peer occurred since last we examined it so
* put it into the appropriate future bucket.
/* Remove to a temporary list all the peers that are currently lodged
* in expired buckets plus all new peers.
*
* Everything in the bucket at the cursor is processed this
* second; the bucket at cursor + 1 goes at now + 1s and so
* on...
*/
slot = (slot + cursor) % ARRAY_SIZE(rxnet->peer_keepalive);
spin_lock_bh(&rxnet->peer_hash_lock);
hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive[slot]);
rxrpc_put_peer(peer);
goto next_peer;
send:
rxrpc_send_keepalive(peer);
now = ktime_get_real();
goto recalc;
list_splice_init(&rxnet->peer_keepalive_new, &collector);
stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
while (base <= now && (s8)(cursor - stop) < 0) {
list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
&collector);
base++;
cursor++;
}
emptied_bucket:
cursor++;
if (cursor >= ARRAY_SIZE(rxnet->peer_keepalive))
cursor = 0;
base = ktime_add_ns(base, NSEC_PER_SEC);
goto next_bucket;
base = now;
spin_unlock_bh(&rxnet->peer_hash_lock);
resched:
rxnet->peer_keepalive_base = base;
rxnet->peer_keepalive_cursor = cursor;
delay = nsecs_to_jiffies(-diff) + 1;
timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
out:
rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
ASSERT(list_empty(&collector));
/* Schedule the timer for the next occupied timeslot. */
cursor = rxnet->peer_keepalive_cursor;
stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
for (; (s8)(cursor - stop) < 0; cursor++) {
if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
break;
base++;
}
now = ktime_get_seconds();
delay = base - now;
if (delay < 1)
delay = 1;
delay *= HZ;
if (rxnet->live)
timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
_leave("");
}
......@@ -322,7 +322,7 @@ struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
if (!peer) {
peer = prealloc;
hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive_new);
list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
}
spin_unlock(&rxnet->peer_hash_lock);
......@@ -367,8 +367,8 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
if (!peer) {
hash_add_rcu(rxnet->peer_hash,
&candidate->hash_link, hash_key);
hlist_add_head(&candidate->keepalive_link,
&rxnet->peer_keepalive_new);
list_add_tail(&candidate->keepalive_link,
&rxnet->peer_keepalive_new);
}
spin_unlock_bh(&rxnet->peer_hash_lock);
......@@ -441,7 +441,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
spin_lock_bh(&rxnet->peer_hash_lock);
hash_del_rcu(&peer->hash_link);
hlist_del_init(&peer->keepalive_link);
list_del_init(&peer->keepalive_link);
spin_unlock_bh(&rxnet->peer_hash_lock);
kfree_rcu(peer, rcu);
......
......@@ -669,7 +669,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
return -EAGAIN;
}
conn->params.peer->last_tx_at = ktime_get_real();
conn->params.peer->last_tx_at = ktime_get_seconds();
_leave(" = 0");
return 0;
}
......@@ -725,7 +725,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
return -EAGAIN;
}
conn->params.peer->last_tx_at = ktime_get_real();
conn->params.peer->last_tx_at = ktime_get_seconds();
_leave(" = 0");
return 0;
}
......
......@@ -1122,6 +1122,8 @@ static void smc_tcp_listen_work(struct work_struct *work)
sock_hold(lsk); /* sock_put in smc_listen_work */
INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
smc_copy_sock_settings_to_smc(new_smc);
new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
sock_hold(&new_smc->sk); /* sock_put in passive closing */
if (!schedule_work(&new_smc->smc_listen_work))
sock_put(&new_smc->sk);
......@@ -1397,8 +1399,7 @@ static int smc_shutdown(struct socket *sock, int how)
lock_sock(sk);
rc = -ENOTCONN;
if ((sk->sk_state != SMC_LISTEN) &&
(sk->sk_state != SMC_ACTIVE) &&
if ((sk->sk_state != SMC_ACTIVE) &&
(sk->sk_state != SMC_PEERCLOSEWAIT1) &&
(sk->sk_state != SMC_PEERCLOSEWAIT2) &&
(sk->sk_state != SMC_APPCLOSEWAIT1) &&
......@@ -1521,12 +1522,16 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
smc = smc_sk(sock->sk);
conn = &smc->conn;
lock_sock(&smc->sk);
if (smc->use_fallback) {
if (!smc->clcsock)
if (!smc->clcsock) {
release_sock(&smc->sk);
return -EBADF;
return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
}
answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
release_sock(&smc->sk);
return answ;
}
lock_sock(&smc->sk);
switch (cmd) {
case SIOCINQ: /* same as FIONREAD */
if (smc->sk.sk_state == SMC_LISTEN) {
......
......@@ -123,15 +123,13 @@ void tipc_net_finalize(struct net *net, u32 addr)
{
struct tipc_net *tn = tipc_net(net);
spin_lock_bh(&tn->node_list_lock);
if (!tipc_own_addr(net)) {
if (!cmpxchg(&tn->node_addr, 0, addr)) {
tipc_set_node_addr(net, addr);
tipc_named_reinit(net);
tipc_sk_reinit(net);
tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
TIPC_CLUSTER_SCOPE, 0, addr);
}
spin_unlock_bh(&tn->node_list_lock);
}
void tipc_net_stop(struct net *net)
......
......@@ -451,14 +451,14 @@ static int vsock_send_shutdown(struct sock *sk, int mode)
return transport->shutdown(vsock_sk(sk), mode);
}
void vsock_pending_work(struct work_struct *work)
static void vsock_pending_work(struct work_struct *work)
{
struct sock *sk;
struct sock *listener;
struct vsock_sock *vsk;
bool cleanup;
vsk = container_of(work, struct vsock_sock, dwork.work);
vsk = container_of(work, struct vsock_sock, pending_work.work);
sk = sk_vsock(vsk);
listener = vsk->listener;
cleanup = true;
......@@ -498,7 +498,6 @@ void vsock_pending_work(struct work_struct *work)
sock_put(sk);
sock_put(listener);
}
EXPORT_SYMBOL_GPL(vsock_pending_work);
/**** SOCKET OPERATIONS ****/
......@@ -597,6 +596,8 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
return retval;
}
static void vsock_connect_timeout(struct work_struct *work);
struct sock *__vsock_create(struct net *net,
struct socket *sock,
struct sock *parent,
......@@ -638,6 +639,8 @@ struct sock *__vsock_create(struct net *net,
vsk->sent_request = false;
vsk->ignore_connecting_rst = false;
vsk->peer_shutdown = 0;
INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
psk = parent ? vsock_sk(parent) : NULL;
if (parent) {
......@@ -1117,7 +1120,7 @@ static void vsock_connect_timeout(struct work_struct *work)
struct vsock_sock *vsk;
int cancel = 0;
vsk = container_of(work, struct vsock_sock, dwork.work);
vsk = container_of(work, struct vsock_sock, connect_work.work);
sk = sk_vsock(vsk);
lock_sock(sk);
......@@ -1221,9 +1224,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
* timeout fires.
*/
sock_hold(sk);
INIT_DELAYED_WORK(&vsk->dwork,
vsock_connect_timeout);
schedule_delayed_work(&vsk->dwork, timeout);
schedule_delayed_work(&vsk->connect_work, timeout);
/* Skip ahead to preserve error code set above. */
goto out_wait;
......
......@@ -1094,8 +1094,7 @@ static int vmci_transport_recv_listen(struct sock *sk,
vpending->listener = sk;
sock_hold(sk);
sock_hold(pending);
INIT_DELAYED_WORK(&vpending->dwork, vsock_pending_work);
schedule_delayed_work(&vpending->dwork, HZ);
schedule_delayed_work(&vpending->pending_work, HZ);
out:
return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment