Commit a2a385d6 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: bool conversions

bool conversions where possible.

__inline__ -> inline

space cleanups
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e005d193
......@@ -263,14 +263,14 @@ extern int tcp_memory_pressure;
* and worry about wraparound (automatic with unsigned arithmetic).
*/
static inline int before(__u32 seq1, __u32 seq2)
static inline bool before(__u32 seq1, __u32 seq2)
{
return (__s32)(seq1-seq2) < 0;
}
#define after(seq2, seq1) before(seq1, seq2)
/* is s2<=s1<=s3 ? */
static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
{
return seq3 - seq2 >= seq1 - seq2;
}
......@@ -305,7 +305,7 @@ static inline void tcp_synq_overflow(struct sock *sk)
}
/* syncookies: no recent synqueue overflow on this listening socket? */
static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
{
unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
......@@ -383,7 +383,7 @@ extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
struct request_sock **prev);
extern int tcp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb);
extern int tcp_use_frto(struct sock *sk);
extern bool tcp_use_frto(struct sock *sk);
extern void tcp_enter_frto(struct sock *sk);
extern void tcp_enter_loss(struct sock *sk, int how);
extern void tcp_clear_retrans(struct tcp_sock *tp);
......@@ -470,7 +470,7 @@ static inline __u32 cookie_v6_init_sequence(struct sock *sk,
extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle);
extern int tcp_may_send_now(struct sock *sk);
extern bool tcp_may_send_now(struct sock *sk);
extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
extern void tcp_retransmit_timer(struct sock *sk);
extern void tcp_xmit_retransmit_queue(struct sock *);
......@@ -484,9 +484,9 @@ extern int tcp_write_wakeup(struct sock *);
extern void tcp_send_fin(struct sock *sk);
extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
extern int tcp_send_synack(struct sock *);
extern int tcp_syn_flood_action(struct sock *sk,
const struct sk_buff *skb,
const char *proto);
extern bool tcp_syn_flood_action(struct sock *sk,
const struct sk_buff *skb,
const char *proto);
extern void tcp_push_one(struct sock *, unsigned int mss_now);
extern void tcp_send_ack(struct sock *sk);
extern void tcp_send_delayed_ack(struct sock *sk);
......@@ -794,12 +794,12 @@ static inline int tcp_is_sack(const struct tcp_sock *tp)
return tp->rx_opt.sack_ok;
}
static inline int tcp_is_reno(const struct tcp_sock *tp)
static inline bool tcp_is_reno(const struct tcp_sock *tp)
{
return !tcp_is_sack(tp);
}
static inline int tcp_is_fack(const struct tcp_sock *tp)
static inline bool tcp_is_fack(const struct tcp_sock *tp)
{
return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
}
......@@ -901,7 +901,7 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
{
return tp->snd_una + tp->snd_wnd;
}
extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
extern bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
const struct sk_buff *skb)
......@@ -944,7 +944,7 @@ static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
return __skb_checksum_complete(skb);
}
static inline int tcp_checksum_complete(struct sk_buff *skb)
static inline bool tcp_checksum_complete(struct sk_buff *skb)
{
return !skb_csum_unnecessary(skb) &&
__tcp_checksum_complete(skb);
......@@ -974,12 +974,12 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
*
* NOTE: is this not too big to inline?
*/
static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
if (sysctl_tcp_low_latency || !tp->ucopy.task)
return 0;
return false;
__skb_queue_tail(&tp->ucopy.prequeue, skb);
tp->ucopy.memory += skb->truesize;
......@@ -1003,7 +1003,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
(3 * tcp_rto_min(sk)) / 4,
TCP_RTO_MAX);
}
return 1;
return true;
}
......@@ -1108,28 +1108,28 @@ static inline int tcp_fin_time(const struct sock *sk)
return fin_timeout;
}
static inline int tcp_paws_check(const struct tcp_options_received *rx_opt,
int paws_win)
static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
int paws_win)
{
if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
return 1;
return true;
if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
return 1;
return true;
/*
* Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
* then following tcp messages have valid values. Ignore 0 value,
* or else 'negative' tsval might forbid us to accept their packets.
*/
if (!rx_opt->ts_recent)
return 1;
return 0;
return true;
return false;
}
static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
int rst)
static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
int rst)
{
if (tcp_paws_check(rx_opt, 0))
return 0;
return false;
/* RST segments are not recommended to carry timestamp,
and, if they do, it is recommended to ignore PAWS because
......@@ -1144,8 +1144,8 @@ static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
However, we can relax time bounds for RST segments to MSL.
*/
if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
return 0;
return 1;
return false;
return true;
}
static inline void tcp_mib_init(struct net *net)
......@@ -1383,7 +1383,7 @@ static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
__skb_unlink(skb, &sk->sk_write_queue);
}
static inline int tcp_write_queue_empty(struct sock *sk)
static inline bool tcp_write_queue_empty(struct sock *sk)
{
return skb_queue_empty(&sk->sk_write_queue);
}
......@@ -1440,7 +1440,7 @@ static inline void tcp_highest_sack_combine(struct sock *sk,
/* Determines whether this is a thin stream (which may suffer from
* increased latency). Used to trigger latency-reducing mechanisms.
*/
static inline unsigned int tcp_stream_is_thin(struct tcp_sock *tp)
static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
{
return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
}
......
......@@ -593,7 +593,7 @@ static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
tp->pushed_seq = tp->write_seq;
}
static inline int forced_push(const struct tcp_sock *tp)
static inline bool forced_push(const struct tcp_sock *tp)
{
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
}
......@@ -1082,7 +1082,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (err)
goto do_fault;
} else {
int merge = 0;
bool merge = false;
int i = skb_shinfo(skb)->nr_frags;
struct page *page = sk->sk_sndmsg_page;
int off;
......@@ -1096,7 +1096,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
off != PAGE_SIZE) {
/* We can extend the last page
* fragment. */
merge = 1;
merge = true;
} else if (i == MAX_SKB_FRAGS || !sg) {
/* Need to add new fragment and cannot
* do this because interface is non-SG,
......@@ -1293,7 +1293,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
void tcp_cleanup_rbuf(struct sock *sk, int copied)
{
struct tcp_sock *tp = tcp_sk(sk);
int time_to_ack = 0;
bool time_to_ack = false;
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
......@@ -1319,7 +1319,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
!icsk->icsk_ack.pingpong)) &&
!atomic_read(&sk->sk_rmem_alloc)))
time_to_ack = 1;
time_to_ack = true;
}
/* We send an ACK if we can now advertise a non-zero window
......@@ -1341,7 +1341,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
* "Lots" means "at least twice" here.
*/
if (new_window && new_window >= 2 * rcv_window_now)
time_to_ack = 1;
time_to_ack = true;
}
}
if (time_to_ack)
......@@ -2171,7 +2171,7 @@ EXPORT_SYMBOL(tcp_close);
/* These states need RST on ABORT according to RFC793 */
static inline int tcp_need_reset(int state)
static inline bool tcp_need_reset(int state)
{
return (1 << state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
......@@ -2245,7 +2245,7 @@ int tcp_disconnect(struct sock *sk, int flags)
}
EXPORT_SYMBOL(tcp_disconnect);
static inline int tcp_can_repair_sock(struct sock *sk)
static inline bool tcp_can_repair_sock(const struct sock *sk)
{
return capable(CAP_NET_ADMIN) &&
((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
......@@ -3172,13 +3172,13 @@ __tcp_alloc_md5sig_pool(struct sock *sk)
struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
{
struct tcp_md5sig_pool __percpu *pool;
int alloc = 0;
bool alloc = false;
retry:
spin_lock_bh(&tcp_md5sig_pool_lock);
pool = tcp_md5sig_pool;
if (tcp_md5sig_users++ == 0) {
alloc = 1;
alloc = true;
spin_unlock_bh(&tcp_md5sig_pool_lock);
} else if (!pool) {
tcp_md5sig_users--;
......
......@@ -280,19 +280,19 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
/* RFC2861 Check whether we are limited by application or congestion window
* This is the inverse of cwnd check in tcp_tso_should_defer
*/
int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
{
const struct tcp_sock *tp = tcp_sk(sk);
u32 left;
if (in_flight >= tp->snd_cwnd)
return 1;
return true;
left = tp->snd_cwnd - in_flight;
if (sk_can_gso(sk) &&
left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
left * tp->mss_cache < sk->sk_gso_max_size)
return 1;
return true;
return left <= tcp_max_tso_deferred_mss(tp);
}
EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
......
......@@ -15,7 +15,7 @@
/* Tcp Hybla structure. */
struct hybla {
u8 hybla_en;
bool hybla_en;
u32 snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */
u32 rho; /* Rho parameter, integer part */
u32 rho2; /* Rho * Rho, integer part */
......@@ -24,8 +24,7 @@ struct hybla {
u32 minrtt; /* Minimum smoothed round trip time value seen */
};
/* Hybla reference round trip time (default= 1/40 sec = 25 ms),
expressed in jiffies */
/* Hybla reference round trip time (default= 1/40 sec = 25 ms), in ms */
static int rtt0 = 25;
module_param(rtt0, int, 0644);
MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)");
......@@ -39,7 +38,7 @@ static inline void hybla_recalc_param (struct sock *sk)
ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8);
ca->rho = ca->rho_3ls >> 3;
ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1;
ca->rho2 = ca->rho2_7ls >>7;
ca->rho2 = ca->rho2_7ls >> 7;
}
static void hybla_init(struct sock *sk)
......@@ -52,7 +51,7 @@ static void hybla_init(struct sock *sk)
ca->rho_3ls = 0;
ca->rho2_7ls = 0;
ca->snd_cwnd_cents = 0;
ca->hybla_en = 1;
ca->hybla_en = true;
tp->snd_cwnd = 2;
tp->snd_cwnd_clamp = 65535;
......@@ -67,6 +66,7 @@ static void hybla_init(struct sock *sk)
static void hybla_state(struct sock *sk, u8 ca_state)
{
struct hybla *ca = inet_csk_ca(sk);
ca->hybla_en = (ca_state == TCP_CA_Open);
}
......
This diff is collapsed.
......@@ -866,14 +866,14 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
}
/*
* Return 1 if a syncookie should be sent
* Return true if a syncookie should be sent
*/
int tcp_syn_flood_action(struct sock *sk,
bool tcp_syn_flood_action(struct sock *sk,
const struct sk_buff *skb,
const char *proto)
{
const char *msg = "Dropping request";
int want_cookie = 0;
bool want_cookie = false;
struct listen_sock *lopt;
......@@ -881,7 +881,7 @@ int tcp_syn_flood_action(struct sock *sk,
#ifdef CONFIG_SYN_COOKIES
if (sysctl_tcp_syncookies) {
msg = "Sending cookies";
want_cookie = 1;
want_cookie = true;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
} else
#endif
......@@ -1196,7 +1196,7 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
}
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
{
/*
* This gets called for each TCP segment that arrives
......@@ -1219,16 +1219,16 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
/* We've parsed the options - do we have a hash? */
if (!hash_expected && !hash_location)
return 0;
return false;
if (hash_expected && !hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
return 1;
return true;
}
if (!hash_expected && hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
return 1;
return true;
}
/* Okay, so this is hash_expected and hash_location -
......@@ -1244,9 +1244,9 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
&iph->daddr, ntohs(th->dest),
genhash ? " tcp_v4_calc_md5_hash failed"
: "");
return 1;
return true;
}
return 0;
return false;
}
#endif
......@@ -1280,7 +1280,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
__be32 saddr = ip_hdr(skb)->saddr;
__be32 daddr = ip_hdr(skb)->daddr;
__u32 isn = TCP_SKB_CB(skb)->when;
int want_cookie = 0;
bool want_cookie = false;
/* Never answer to SYNs send to broadcast or multicast */
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
......@@ -1339,7 +1339,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
while (l-- > 0)
*c++ ^= *hash_location++;
want_cookie = 0; /* not our kind of cookie */
want_cookie = false; /* not our kind of cookie */
tmp_ext.cookie_out_never = 0; /* false */
tmp_ext.cookie_plus = tmp_opt.cookie_plus;
} else if (!tp->rx_opt.cookie_in_always) {
......@@ -2073,7 +2073,7 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
return rc;
}
static inline int empty_bucket(struct tcp_iter_state *st)
static inline bool empty_bucket(struct tcp_iter_state *st)
{
return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
......
......@@ -55,7 +55,7 @@ EXPORT_SYMBOL_GPL(tcp_death_row);
* state.
*/
static int tcp_remember_stamp(struct sock *sk)
static bool tcp_remember_stamp(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
......@@ -72,13 +72,13 @@ static int tcp_remember_stamp(struct sock *sk)
}
if (release_it)
inet_putpeer(peer);
return 1;
return true;
}
return 0;
return false;
}
static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
static bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
{
struct sock *sk = (struct sock *) tw;
struct inet_peer *peer;
......@@ -94,17 +94,17 @@ static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
peer->tcp_ts = tcptw->tw_ts_recent;
}
inet_putpeer(peer);
return 1;
return true;
}
return 0;
return false;
}
static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
{
if (seq == s_win)
return 1;
return true;
if (after(end_seq, s_win) && before(seq, e_win))
return 1;
return true;
return seq == e_win && seq == end_seq;
}
......@@ -143,7 +143,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
struct tcp_options_received tmp_opt;
const u8 *hash_location;
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
int paws_reject = 0;
bool paws_reject = false;
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
......@@ -316,7 +316,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
struct inet_timewait_sock *tw = NULL;
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
int recycle_ok = 0;
bool recycle_ok = false;
if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
recycle_ok = tcp_remember_stamp(sk);
......@@ -575,7 +575,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct sock *child;
const struct tcphdr *th = tcp_hdr(skb);
__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
int paws_reject = 0;
bool paws_reject = false;
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(struct tcphdr)>>2)) {
......
......@@ -370,7 +370,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
TCP_SKB_CB(skb)->end_seq = seq;
}
static inline int tcp_urg_mode(const struct tcp_sock *tp)
static inline bool tcp_urg_mode(const struct tcp_sock *tp)
{
return tp->snd_una != tp->snd_up;
}
......@@ -1391,20 +1391,20 @@ static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
}
/* Minshall's variant of the Nagle send check. */
static inline int tcp_minshall_check(const struct tcp_sock *tp)
static inline bool tcp_minshall_check(const struct tcp_sock *tp)
{
return after(tp->snd_sml, tp->snd_una) &&
!after(tp->snd_sml, tp->snd_nxt);
}
/* Return 0, if packet can be sent now without violation Nagle's rules:
/* Return false, if packet can be sent now without violation Nagle's rules:
* 1. It is full sized.
* 2. Or it contains FIN. (already checked by caller)
* 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
* 4. Or TCP_CORK is not set, and all sent packets are ACKed.
* With Minshall's modification: all sent small packets are ACKed.
*/
static inline int tcp_nagle_check(const struct tcp_sock *tp,
static inline bool tcp_nagle_check(const struct tcp_sock *tp,
const struct sk_buff *skb,
unsigned int mss_now, int nonagle)
{
......@@ -1413,11 +1413,11 @@ static inline int tcp_nagle_check(const struct tcp_sock *tp,
(!nonagle && tp->packets_out && tcp_minshall_check(tp)));
}
/* Return non-zero if the Nagle test allows this packet to be
/* Return true if the Nagle test allows this packet to be
* sent now.
*/
static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
unsigned int cur_mss, int nonagle)
static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
unsigned int cur_mss, int nonagle)
{
/* Nagle rule does not apply to frames, which sit in the middle of the
* write_queue (they have no chances to get new data).
......@@ -1426,24 +1426,25 @@ static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff
* argument based upon the location of SKB in the send queue.
*/
if (nonagle & TCP_NAGLE_PUSH)
return 1;
return true;
/* Don't use the nagle rule for urgent data (or for the final FIN).
* Nagle can be ignored during F-RTO too (see RFC4138).
*/
if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
return 1;
return true;
if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
return 1;
return true;
return 0;
return false;
}
/* Does at least the first segment of SKB fit into the send window? */
static inline int tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb,
unsigned int cur_mss)
static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
const struct sk_buff *skb,
unsigned int cur_mss)
{
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
......@@ -1476,7 +1477,7 @@ static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
}
/* Test if sending is allowed right now. */
int tcp_may_send_now(struct sock *sk)
bool tcp_may_send_now(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = tcp_send_head(sk);
......@@ -1546,7 +1547,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
*
* This algorithm is from John Heffner.
*/
static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
......@@ -1606,11 +1607,11 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
/* Ok, it looks like it is advisable to defer. */
tp->tso_deferred = 1 | (jiffies << 1);
return 1;
return true;
send_now:
tp->tso_deferred = 0;
return 0;
return false;
}
/* Create a new MTU probe if we are ready.
......@@ -1752,11 +1753,11 @@ static int tcp_mtu_probe(struct sock *sk)
* snd_up-64k-mss .. snd_up cannot be large. However, taking into
* account rare use of URG, this is not a big flaw.
*
* Returns 1, if no segments are in flight and we have queued segments, but
* cannot send anything now because of SWS or another problem.
* Returns true, if no segments are in flight and we have queued segments,
* but cannot send anything now because of SWS or another problem.
*/
static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
int push_one, gfp_t gfp)
static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
int push_one, gfp_t gfp)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
......@@ -1770,7 +1771,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
/* Do MTU probing. */
result = tcp_mtu_probe(sk);
if (!result) {
return 0;
return false;
} else if (result > 0) {
sent_pkts = 1;
}
......@@ -1829,7 +1830,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (likely(sent_pkts)) {
tcp_cwnd_validate(sk);
return 0;
return false;
}
return !tp->packets_out && tcp_send_head(sk);
}
......@@ -2028,22 +2029,22 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
}
/* Check if coalescing SKBs is legal. */
static int tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
{
if (tcp_skb_pcount(skb) > 1)
return 0;
return false;
/* TODO: SACK collapsing could be used to remove this condition */
if (skb_shinfo(skb)->nr_frags != 0)
return 0;
return false;
if (skb_cloned(skb))
return 0;
return false;
if (skb == tcp_send_head(sk))
return 0;
return false;
/* Some heurestics for collapsing over SACK'd could be invented */
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
return 0;
return false;
return 1;
return true;
}
/* Collapse packets in the retransmit queue to make to create
......@@ -2054,7 +2055,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = to, *tmp;
int first = 1;
bool first = true;
if (!sysctl_tcp_retrans_collapse)
return;
......@@ -2068,7 +2069,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
space -= skb->len;
if (first) {
first = 0;
first = false;
continue;
}
......@@ -2208,18 +2209,18 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
/* Check if we forward retransmits are possible in the current
* window/congestion state.
*/
static int tcp_can_forward_retransmit(struct sock *sk)
static bool tcp_can_forward_retransmit(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
/* Forward retransmissions are possible only during Recovery. */
if (icsk->icsk_ca_state != TCP_CA_Recovery)
return 0;
return false;
/* No forward retransmissions in Reno are possible. */
if (tcp_is_reno(tp))
return 0;
return false;
/* Yeah, we have to make difficult choice between forward transmission
* and retransmission... Both ways have their merits...
......@@ -2230,9 +2231,9 @@ static int tcp_can_forward_retransmit(struct sock *sk)
*/
if (tcp_may_send_now(sk))
return 0;
return false;
return 1;
return true;
}
/* This gets called after a retransmit timeout, and the initially
......
......@@ -1055,7 +1055,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
struct tcp_sock *tp = tcp_sk(sk);
__u32 isn = TCP_SKB_CB(skb)->when;
struct dst_entry *dst = NULL;
int want_cookie = 0;
bool want_cookie = false;
if (skb->protocol == htons(ETH_P_IP))
return tcp_v4_conn_request(sk, skb);
......@@ -1116,7 +1116,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
while (l-- > 0)
*c++ ^= *hash_location++;
want_cookie = 0; /* not our kind of cookie */
want_cookie = false; /* not our kind of cookie */
tmp_ext.cookie_out_never = 0; /* false */
tmp_ext.cookie_plus = tmp_opt.cookie_plus;
} else if (!tp->rx_opt.cookie_in_always) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment