Commit df28e869 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-better-const'

Eric Dumazet says:

====================
net: better const qualifier awareness

This is a follow-up of d27d367d ("inet: better const qualifier awareness")

Adopting container_of_const() to perform (struct sock *)->(protocol sock *)
operation is allowing us to propagate const qualifier and thus detect
misuses at compile time.

Most conversions are trivial, because most protocols did not adopt yet
const sk pointers where it could make sense.

Only mptcp and tcp patches (end of this series) are requiring small
adjustments.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 39a86d05 e9d9da91
......@@ -305,10 +305,8 @@ struct dccp_sock {
struct timer_list dccps_xmit_timer;
};
static inline struct dccp_sock *dccp_sk(const struct sock *sk)
{
return (struct dccp_sock *)sk;
}
#define dccp_sk(ptr) container_of_const(ptr, struct dccp_sock, \
dccps_inet_connection.icsk_inet.sk)
static inline const char *dccp_role(const struct sock *sk)
{
......
......@@ -336,10 +336,7 @@ static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk)
return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL;
}
static inline struct raw6_sock *raw6_sk(const struct sock *sk)
{
return (struct raw6_sock *)sk;
}
#define raw6_sk(ptr) container_of_const(ptr, struct raw6_sock, inet.sk)
#define ipv6_only_sock(sk) (sk->sk_ipv6only)
#define ipv6_sk_rxinfo(sk) ((sk)->sk_family == PF_INET6 && \
......
......@@ -472,10 +472,12 @@ enum tsq_flags {
TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED),
};
static inline struct tcp_sock *tcp_sk(const struct sock *sk)
{
return (struct tcp_sock *)sk;
}
#define tcp_sk(ptr) container_of_const(ptr, struct tcp_sock, inet_conn.icsk_inet.sk)
/* Variant of tcp_sk() upgrading a const sock to a read/write tcp socket.
* Used in context of (lockless) tcp listeners.
*/
#define tcp_sk_rw(ptr) container_of(ptr, struct tcp_sock, inet_conn.icsk_inet.sk)
struct tcp_timewait_sock {
struct inet_timewait_sock tw_sk;
......
......@@ -97,10 +97,7 @@ struct udp_sock {
#define UDP_MAX_SEGMENTS (1 << 6UL)
static inline struct udp_sock *udp_sk(const struct sock *sk)
{
return (struct udp_sock *)sk;
}
#define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
{
......
......@@ -74,10 +74,7 @@ struct unix_sock {
#endif
};
static inline struct unix_sock *unix_sk(const struct sock *sk)
{
return (struct unix_sock *)sk;
}
#define unix_sk(ptr) container_of_const(ptr, struct unix_sock, sk)
#define peer_wait peer_wq.wait
......
......@@ -260,10 +260,7 @@ struct ax25_sock {
struct ax25_cb *cb;
};
static inline struct ax25_sock *ax25_sk(const struct sock *sk)
{
return (struct ax25_sock *) sk;
}
#define ax25_sk(ptr) container_of_const(ptr, struct ax25_sock, sk)
static inline struct ax25_cb *sk_to_ax25(const struct sock *sk)
{
......
......@@ -83,10 +83,7 @@ struct raw_sock {
u32 ipmr_table;
};
static inline struct raw_sock *raw_sk(const struct sock *sk)
{
return (struct raw_sock *)sk;
}
#define raw_sk(ptr) container_of_const(ptr, struct raw_sock, inet.sk)
static inline bool raw_sk_bound_dev_eq(struct net *net, int bound_dev_if,
int dif, int sdif)
......
......@@ -529,7 +529,7 @@ static inline void tcp_synq_overflow(const struct sock *sk)
last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
if (!time_between32(now, last_overflow, last_overflow + HZ))
WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now);
WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now);
}
/* syncookies: no recent synqueue overflow on this listening socket? */
......
......@@ -177,10 +177,7 @@ struct x25_forward {
atomic_t refcnt;
};
static inline struct x25_sock *x25_sk(const struct sock *sk)
{
return (struct x25_sock *)sk;
}
#define x25_sk(ptr) container_of_const(ptr, struct x25_sock, sk)
/* af_x25.c */
extern int sysctl_x25_restart_request_timeout;
......
......@@ -4570,7 +4570,7 @@ tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
const __u8 *hash_location = NULL;
struct tcp_md5sig_key *hash_expected;
const struct tcphdr *th = tcp_hdr(skb);
struct tcp_sock *tp = tcp_sk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
int genhash, l3index;
u8 newhash[16];
......
......@@ -458,7 +458,7 @@ static void tcp_sndbuf_expand(struct sock *sk)
static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb,
unsigned int skbtruesize)
{
struct tcp_sock *tp = tcp_sk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
/* Optimize this! */
int truesize = tcp_win_from_space(sk, skbtruesize) >> 1;
int window = tcp_win_from_space(sk, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])) >> 1;
......@@ -5693,7 +5693,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t
*/
static bool tcp_reset_check(const struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) &&
(1 << sk->sk_state) & (TCPF_CLOSE_WAIT | TCPF_LAST_ACK |
......
......@@ -463,7 +463,7 @@ void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
}
EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
struct request_sock *req,
struct tcp_sock *newtp)
{
......@@ -492,7 +492,8 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
const struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_request_sock *treq = tcp_rsk(req);
struct inet_connection_sock *newicsk;
struct tcp_sock *oldtp, *newtp;
const struct tcp_sock *oldtp;
struct tcp_sock *newtp;
u32 seq;
if (!newsk)
......
......@@ -4127,8 +4127,13 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
if (!res) {
TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
if (unlikely(tcp_passive_fastopen(sk)))
tcp_sk(sk)->total_retrans++;
if (unlikely(tcp_passive_fastopen(sk))) {
/* sk has const attribute because listeners are lockless.
* However in this case, we are dealing with a passive fastopen
* socket thus we can change total_retrans value.
*/
tcp_sk_rw(sk)->total_retrans++;
}
trace_tcp_retransmit_synack(sk, req);
}
return res;
......
......@@ -4,7 +4,7 @@
static u32 tcp_rack_reo_wnd(const struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
if (!tp->reord_seen) {
/* If reordering has not been observed, be aggressive during
......
......@@ -459,7 +459,7 @@ static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
return false;
}
static void mptcp_set_datafin_timeout(const struct sock *sk)
static void mptcp_set_datafin_timeout(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
u32 retransmits;
......
......@@ -334,10 +334,7 @@ static inline void msk_owned_by_me(const struct mptcp_sock *msk)
sock_owned_by_me((const struct sock *)msk);
}
static inline struct mptcp_sock *mptcp_sk(const struct sock *sk)
{
return (struct mptcp_sock *)sk;
}
#define mptcp_sk(ptr) container_of_const(ptr, struct mptcp_sock, sk.icsk_inet.sk)
/* the msk socket don't use the backlog, also account for the bulk
* free memory
......@@ -371,7 +368,7 @@ static inline struct mptcp_data_frag *mptcp_send_next(struct sock *sk)
static inline struct mptcp_data_frag *mptcp_pending_tail(const struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
const struct mptcp_sock *msk = mptcp_sk(sk);
if (!msk->first_pending)
return NULL;
......@@ -382,7 +379,7 @@ static inline struct mptcp_data_frag *mptcp_pending_tail(const struct sock *sk)
return list_last_entry(&msk->rtx_queue, struct mptcp_data_frag, list);
}
static inline struct mptcp_data_frag *mptcp_rtx_head(const struct sock *sk)
static inline struct mptcp_data_frag *mptcp_rtx_head(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
......
......@@ -133,10 +133,7 @@ struct packet_sock {
atomic_t tp_drops ____cacheline_aligned_in_smp;
};
static inline struct packet_sock *pkt_sk(struct sock *sk)
{
return (struct packet_sock *)sk;
}
#define pkt_sk(ptr) container_of_const(ptr, struct packet_sock, sk)
enum packet_sock_flags {
PACKET_SOCK_ORIGDEV,
......
......@@ -283,10 +283,7 @@ struct smc_sock { /* smc sock container */
* */
};
static inline struct smc_sock *smc_sk(const struct sock *sk)
{
return (struct smc_sock *)sk;
}
#define smc_sk(ptr) container_of_const(ptr, struct smc_sock, sk)
static inline void smc_init_saved_callbacks(struct smc_sock *smc)
{
......
......@@ -310,7 +310,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
case LSM_AUDIT_DATA_NET:
if (a->u.net->sk) {
const struct sock *sk = a->u.net->sk;
struct unix_sock *u;
const struct unix_sock *u;
struct unix_address *addr;
int len = 0;
char *p = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment