Commit ceef59b5 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next

Florian Westphal says:

====================
The following set contains changes for your *net-next* tree:

- make conntrack ignore packets that are delayed (containing
  data already acked).  The current behaviour to flag them as INVALID
  causes more harm than good, let them pass so peer can send an
  immediate ACK for the most recent sequence number.
- make conntrack recognize when both peers have sent 'invalid' FINs:
  This helps cleaning out stale connections faster for those cases where
  conntrack is no longer in sync with the actual connection state.
- Now that DECNET is gone, we don't need to reserve space for DECNET
  related information.
- compact common 'find a free port number for the new inbound
  connection' code and move it to a helper, then cap number of tries
  the new helper will make until it gives up.
- replace various instances of strlcpy with strscpy, from Wolfram Sang.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9f8f1933 adda60cc
......@@ -38,4 +38,5 @@ bool nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct,
* to port ct->master->saved_proto. */
void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *this);
u16 nf_nat_exp_find_port(struct nf_conntrack_expect *exp, u16 port);
#endif
......@@ -63,7 +63,9 @@ enum {
NFPROTO_NETDEV = 5,
NFPROTO_BRIDGE = 7,
NFPROTO_IPV6 = 10,
#ifndef __KERNEL__ /* no longer supported by kernel */
NFPROTO_DECNET = 12,
#endif
NFPROTO_NUMPROTO,
};
......
......@@ -291,20 +291,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
exp->expectfn = nf_nat_follow_master;
exp->dir = !dir;
/* Try to get same port: if not, try to change it. */
for (; nated_port != 0; nated_port++) {
int ret;
exp->tuple.dst.u.tcp.port = htons(nated_port);
ret = nf_ct_expect_related(exp, 0);
if (ret == 0)
break;
else if (ret != -EBUSY) {
nated_port = 0;
break;
}
}
nated_port = nf_nat_exp_find_port(exp, nated_port);
if (nated_port == 0) { /* No port available */
net_notice_ratelimited("nf_nat_h323: out of TCP ports\n");
return 0;
......@@ -347,20 +334,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
if (info->sig_port[dir] == port)
nated_port = ntohs(info->sig_port[!dir]);
/* Try to get same port: if not, try to change it. */
for (; nated_port != 0; nated_port++) {
int ret;
exp->tuple.dst.u.tcp.port = htons(nated_port);
ret = nf_ct_expect_related(exp, 0);
if (ret == 0)
break;
else if (ret != -EBUSY) {
nated_port = 0;
break;
}
}
nated_port = nf_nat_exp_find_port(exp, nated_port);
if (nated_port == 0) { /* No port available */
net_notice_ratelimited("nf_nat_q931: out of TCP ports\n");
return 0;
......@@ -439,20 +413,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
if (info->sig_port[dir] == port)
nated_port = ntohs(info->sig_port[!dir]);
/* Try to get same port: if not, try to change it. */
for (; nated_port != 0; nated_port++) {
int ret;
exp->tuple.dst.u.tcp.port = htons(nated_port);
ret = nf_ct_expect_related(exp, 0);
if (ret == 0)
break;
else if (ret != -EBUSY) {
nated_port = 0;
break;
}
}
nated_port = nf_nat_exp_find_port(exp, nated_port);
if (nated_port == 0) { /* No port available */
net_notice_ratelimited("nf_nat_ras: out of TCP ports\n");
return 0;
......@@ -532,20 +493,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
exp->expectfn = ip_nat_callforwarding_expect;
exp->dir = !dir;
/* Try to get same port: if not, try to change it. */
for (nated_port = ntohs(port); nated_port != 0; nated_port++) {
int ret;
exp->tuple.dst.u.tcp.port = htons(nated_port);
ret = nf_ct_expect_related(exp, 0);
if (ret == 0)
break;
else if (ret != -EBUSY) {
nated_port = 0;
break;
}
}
nated_port = nf_nat_exp_find_port(exp, ntohs(port));
if (nated_port == 0) { /* No port available */
net_notice_ratelimited("nf_nat_q931: out of TCP ports\n");
return 0;
......
......@@ -353,7 +353,7 @@ ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
if (unlikely(!c))
return;
strlcpy(c->str, ext->comment, len + 1);
strscpy(c->str, ext->comment, len + 1);
set->ext_size += sizeof(*c) + strlen(c->str) + 1;
rcu_assign_pointer(comment->c, c);
}
......@@ -1072,7 +1072,7 @@ static int ip_set_create(struct sk_buff *skb, const struct nfnl_info *info,
if (!set)
return -ENOMEM;
spin_lock_init(&set->lock);
strlcpy(set->name, name, IPSET_MAXNAMELEN);
strscpy(set->name, name, IPSET_MAXNAMELEN);
set->family = family;
set->revision = revision;
......
......@@ -2611,7 +2611,7 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
dst->addr = src->addr.ip;
dst->port = src->port;
dst->fwmark = src->fwmark;
strlcpy(dst->sched_name, sched_name, sizeof(dst->sched_name));
strscpy(dst->sched_name, sched_name, sizeof(dst->sched_name));
dst->flags = src->flags;
dst->timeout = src->timeout / HZ;
dst->netmask = src->netmask;
......@@ -2805,13 +2805,13 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
mutex_lock(&ipvs->sync_mutex);
if (ipvs->sync_state & IP_VS_STATE_MASTER) {
d[0].state = IP_VS_STATE_MASTER;
strlcpy(d[0].mcast_ifn, ipvs->mcfg.mcast_ifn,
strscpy(d[0].mcast_ifn, ipvs->mcfg.mcast_ifn,
sizeof(d[0].mcast_ifn));
d[0].syncid = ipvs->mcfg.syncid;
}
if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
d[1].state = IP_VS_STATE_BACKUP;
strlcpy(d[1].mcast_ifn, ipvs->bcfg.mcast_ifn,
strscpy(d[1].mcast_ifn, ipvs->bcfg.mcast_ifn,
sizeof(d[1].mcast_ifn));
d[1].syncid = ipvs->bcfg.syncid;
}
......@@ -3561,7 +3561,7 @@ static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
attrs[IPVS_DAEMON_ATTR_MCAST_IFN] &&
attrs[IPVS_DAEMON_ATTR_SYNC_ID]))
return -EINVAL;
strlcpy(c.mcast_ifn, nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
strscpy(c.mcast_ifn, nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
sizeof(c.mcast_ifn));
c.syncid = nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]);
......
......@@ -47,6 +47,12 @@ static const char *const tcp_conntrack_names[] = {
"SYN_SENT2",
};
enum nf_ct_tcp_action {
NFCT_TCP_IGNORE,
NFCT_TCP_INVALID,
NFCT_TCP_ACCEPT,
};
#define SECS * HZ
#define MINS * 60 SECS
#define HOURS * 60 MINS
......@@ -472,23 +478,45 @@ static void tcp_init_sender(struct ip_ct_tcp_state *sender,
}
}
static bool tcp_in_window(struct nf_conn *ct,
enum ip_conntrack_dir dir,
unsigned int index,
const struct sk_buff *skb,
unsigned int dataoff,
const struct tcphdr *tcph,
__printf(6, 7)
static enum nf_ct_tcp_action nf_tcp_log_invalid(const struct sk_buff *skb,
const struct nf_conn *ct,
const struct nf_hook_state *state,
const struct ip_ct_tcp_state *sender,
enum nf_ct_tcp_action ret,
const char *fmt, ...)
{
const struct nf_tcp_net *tn = nf_tcp_pernet(nf_ct_net(ct));
struct va_format vaf;
va_list args;
bool be_liberal;
be_liberal = sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL || tn->tcp_be_liberal;
if (be_liberal)
return NFCT_TCP_ACCEPT;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
nf_ct_l4proto_log_invalid(skb, ct, state, "%pV", &vaf);
va_end(args);
return ret;
}
static enum nf_ct_tcp_action
tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir,
unsigned int index, const struct sk_buff *skb,
unsigned int dataoff, const struct tcphdr *tcph,
const struct nf_hook_state *hook_state)
{
struct ip_ct_tcp *state = &ct->proto.tcp;
struct net *net = nf_ct_net(ct);
struct nf_tcp_net *tn = nf_tcp_pernet(net);
struct ip_ct_tcp_state *sender = &state->seen[dir];
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
__u32 seq, ack, sack, end, win, swin;
u16 win_raw;
bool in_recv_win, seq_ok;
s32 receiver_offset;
bool res, in_recv_win;
u16 win_raw;
/*
* Get the required data from the packet.
......@@ -517,7 +545,7 @@ static bool tcp_in_window(struct nf_conn *ct,
end, win);
if (!tcph->ack)
/* Simultaneous open */
return true;
return NFCT_TCP_ACCEPT;
} else {
/*
* We are in the middle of a connection,
......@@ -560,7 +588,7 @@ static bool tcp_in_window(struct nf_conn *ct,
end, win);
if (dir == IP_CT_DIR_REPLY && !tcph->ack)
return true;
return NFCT_TCP_ACCEPT;
}
if (!(tcph->ack)) {
......@@ -584,23 +612,63 @@ static bool tcp_in_window(struct nf_conn *ct,
*/
seq = end = sender->td_end;
/* Is the ending sequence in the receive window (if available)? */
in_recv_win = !receiver->td_maxwin ||
seq_ok = before(seq, sender->td_maxend + 1);
if (!seq_ok) {
u32 overshot = end - sender->td_maxend + 1;
bool ack_ok;
ack_ok = after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1);
in_recv_win = receiver->td_maxwin &&
after(end, sender->td_end - receiver->td_maxwin - 1);
if (before(seq, sender->td_maxend + 1) &&
in_recv_win &&
before(sack, receiver->td_end + 1) &&
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
/*
* Take into account window scaling (RFC 1323).
if (in_recv_win &&
ack_ok &&
overshot <= receiver->td_maxwin &&
before(sack, receiver->td_end + 1)) {
/* Work around TCPs that send more bytes than allowed by
* the receive window.
*
* If the (marked as invalid) packet is allowed to pass by
* the ruleset and the peer acks this data, then its possible
* all future packets will trigger 'ACK is over upper bound' check.
*
* Thus if only the sequence check fails then do update td_end so
* possible ACK for this data can update internal state.
*/
sender->td_end = end;
sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
"%u bytes more than expected", overshot);
}
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID,
"SEQ is over upper bound %u (over the window of the receiver)",
sender->td_maxend + 1);
}
if (!before(sack, receiver->td_end + 1))
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID,
"ACK is over upper bound %u (ACKed data not seen yet)",
receiver->td_end + 1);
/* Is the ending sequence in the receive window (if available)? */
in_recv_win = !receiver->td_maxwin ||
after(end, sender->td_end - receiver->td_maxwin - 1);
if (!in_recv_win)
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
"SEQ is under lower bound %u (already ACKed data retransmitted)",
sender->td_end - receiver->td_maxwin - 1);
if (!after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1))
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
"ignored ACK under lower bound %u (possible overly delayed)",
receiver->td_end - MAXACKWINDOW(sender) - 1);
/* Take into account window scaling (RFC 1323). */
if (!tcph->syn)
win <<= sender->td_scale;
/*
* Update sender data.
*/
/* Update sender data. */
swin = win + (sack - ack);
if (sender->td_maxwin < swin)
sender->td_maxwin = swin;
......@@ -612,13 +680,12 @@ static bool tcp_in_window(struct nf_conn *ct,
if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) {
sender->td_maxack = ack;
sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET;
} else if (after(ack, sender->td_maxack))
} else if (after(ack, sender->td_maxack)) {
sender->td_maxack = ack;
}
}
/*
* Update receiver data.
*/
/* Update receiver data. */
if (receiver->td_maxwin != 0 && after(end, sender->td_maxend))
receiver->td_maxwin += end - sender->td_maxend;
if (after(sack + win, receiver->td_maxend - 1)) {
......@@ -629,17 +696,15 @@ static bool tcp_in_window(struct nf_conn *ct,
if (ack == receiver->td_end)
receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
/*
* Check retransmissions.
*/
/* Check retransmissions. */
if (index == TCP_ACK_SET) {
if (state->last_dir == dir
&& state->last_seq == seq
&& state->last_ack == ack
&& state->last_end == end
&& state->last_win == win_raw)
if (state->last_dir == dir &&
state->last_seq == seq &&
state->last_ack == ack &&
state->last_end == end &&
state->last_win == win_raw) {
state->retrans++;
else {
} else {
state->last_dir = dir;
state->last_seq = seq;
state->last_ack = ack;
......@@ -648,58 +713,65 @@ static bool tcp_in_window(struct nf_conn *ct,
state->retrans = 0;
}
}
res = true;
} else {
res = false;
if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
tn->tcp_be_liberal)
res = true;
if (!res) {
bool seq_ok = before(seq, sender->td_maxend + 1);
if (!seq_ok) {
u32 overshot = end - sender->td_maxend + 1;
bool ack_ok;
return NFCT_TCP_ACCEPT;
}
ack_ok = after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1);
static void __cold nf_tcp_handle_invalid(struct nf_conn *ct,
enum ip_conntrack_dir dir,
int index,
const struct sk_buff *skb,
const struct nf_hook_state *hook_state)
{
const unsigned int *timeouts;
const struct nf_tcp_net *tn;
unsigned int timeout;
u32 expires;
if (in_recv_win &&
ack_ok &&
overshot <= receiver->td_maxwin &&
before(sack, receiver->td_end + 1)) {
/* Work around TCPs that send more bytes than allowed by
* the receive window.
*
* If the (marked as invalid) packet is allowed to pass by
* the ruleset and the peer acks this data, then its possible
* all future packets will trigger 'ACK is over upper bound' check.
if (!test_bit(IPS_ASSURED_BIT, &ct->status) ||
test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
return;
/* We don't want to have connections hanging around in ESTABLISHED
* state for long time 'just because' conntrack deemed a FIN/RST
* out-of-window.
*
* Thus if only the sequence check fails then do update td_end so
* possible ACK for this data can update internal state.
* Shrink the timeout just like when there is unacked data.
* This speeds up eviction of 'dead' connections where the
* connection and conntracks internal state are out of sync.
*/
sender->td_end = end;
sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
nf_ct_l4proto_log_invalid(skb, ct, hook_state,
"%u bytes more than expected", overshot);
return res;
}
switch (index) {
case TCP_RST_SET:
case TCP_FIN_SET:
break;
default:
return;
}
if (ct->proto.tcp.last_dir != dir &&
(ct->proto.tcp.last_index == TCP_FIN_SET ||
ct->proto.tcp.last_index == TCP_RST_SET)) {
expires = nf_ct_expires(ct);
if (expires < 120 * HZ)
return;
tn = nf_tcp_pernet(nf_ct_net(ct));
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = tn->timeouts;
timeout = READ_ONCE(timeouts[TCP_CONNTRACK_UNACK]);
if (expires > timeout) {
nf_ct_l4proto_log_invalid(skb, ct, hook_state,
"%s",
before(seq, sender->td_maxend + 1) ?
in_recv_win ?
before(sack, receiver->td_end + 1) ?
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
: "ACK is under the lower bound (possible overly delayed ACK)"
: "ACK is over the upper bound (ACKed data not seen yet)"
: "SEQ is under the lower bound (already ACKed data retransmitted)"
: "SEQ is over the upper bound (over the window of the receiver)");
"packet (index %d, dir %d) response for index %d lower timeout to %u",
index, dir, ct->proto.tcp.last_index, timeout);
WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp);
}
} else {
ct->proto.tcp.last_index = index;
ct->proto.tcp.last_dir = dir;
}
return res;
}
/* table of valid flag combinations - PUSH, ECE and CWR are always valid */
......@@ -861,6 +933,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
struct nf_conntrack_tuple *tuple;
enum tcp_conntrack new_state, old_state;
unsigned int index, *timeouts;
enum nf_ct_tcp_action res;
enum ip_conntrack_dir dir;
const struct tcphdr *th;
struct tcphdr _tcph;
......@@ -1126,10 +1199,18 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
break;
}
if (!tcp_in_window(ct, dir, index,
skb, dataoff, th, state)) {
res = tcp_in_window(ct, dir, index,
skb, dataoff, th, state);
switch (res) {
case NFCT_TCP_IGNORE:
spin_unlock_bh(&ct->lock);
return NF_ACCEPT;
case NFCT_TCP_INVALID:
nf_tcp_handle_invalid(ct, dir, index, skb, state);
spin_unlock_bh(&ct->lock);
return -NF_ACCEPT;
case NFCT_TCP_ACCEPT:
break;
}
in_window:
/* From now on we have got in-window packets */
......
......@@ -443,9 +443,9 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
mutex_lock(&nf_log_mutex);
logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
if (!logger)
strlcpy(buf, "NONE", sizeof(buf));
strscpy(buf, "NONE", sizeof(buf));
else
strlcpy(buf, logger->name, sizeof(buf));
strscpy(buf, logger->name, sizeof(buf));
mutex_unlock(&nf_log_mutex);
r = proc_dostring(&tmp, write, buffer, lenp, ppos);
}
......
......@@ -44,19 +44,7 @@ static unsigned int help(struct sk_buff *skb,
exp->expectfn = nf_nat_follow_master;
/* Try to get same port: if not, try to change it. */
for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
int res;
exp->tuple.dst.u.tcp.port = htons(port);
res = nf_ct_expect_related(exp, 0);
if (res == 0)
break;
else if (res != -EBUSY) {
port = 0;
break;
}
}
port = nf_nat_exp_find_port(exp, ntohs(exp->saved_proto.tcp.port));
if (port == 0) {
nf_ct_helper_log(skb, exp->master, "all ports in use");
return NF_DROP;
......
......@@ -86,22 +86,9 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
* this one. */
exp->expectfn = nf_nat_follow_master;
/* Try to get same port: if not, try to change it. */
for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
int ret;
exp->tuple.dst.u.tcp.port = htons(port);
ret = nf_ct_expect_related(exp, 0);
if (ret == 0)
break;
else if (ret != -EBUSY) {
port = 0;
break;
}
}
port = nf_nat_exp_find_port(exp, ntohs(exp->saved_proto.tcp.port));
if (port == 0) {
nf_ct_helper_log(skb, ct, "all ports in use");
nf_ct_helper_log(skb, exp->master, "all ports in use");
return NF_DROP;
}
......
......@@ -198,3 +198,34 @@ void nf_nat_follow_master(struct nf_conn *ct,
nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
}
EXPORT_SYMBOL(nf_nat_follow_master);
u16 nf_nat_exp_find_port(struct nf_conntrack_expect *exp, u16 port)
{
static const unsigned int max_attempts = 128;
int range, attempts_left;
u16 min = port;
range = USHRT_MAX - port;
attempts_left = range;
if (attempts_left > max_attempts)
attempts_left = max_attempts;
/* Try to get same port: if not, try to change it. */
for (;;) {
int res;
exp->tuple.dst.u.tcp.port = htons(port);
res = nf_ct_expect_related(exp, 0);
if (res == 0)
return port;
if (res != -EBUSY || (--attempts_left < 0))
break;
port = min + prandom_u32_max(range);
}
return 0;
}
EXPORT_SYMBOL_GPL(nf_nat_exp_find_port);
......@@ -48,20 +48,8 @@ static unsigned int help(struct sk_buff *skb,
exp->dir = IP_CT_DIR_REPLY;
exp->expectfn = nf_nat_follow_master;
/* Try to get same port: if not, try to change it. */
for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
int ret;
exp->tuple.dst.u.tcp.port = htons(port);
ret = nf_ct_expect_related(exp, 0);
if (ret == 0)
break;
else if (ret != -EBUSY) {
port = 0;
break;
}
}
port = nf_nat_exp_find_port(exp,
ntohs(exp->saved_proto.tcp.port));
if (port == 0) {
nf_ct_helper_log(skb, ct, "all ports in use");
return NF_DROP;
......
......@@ -410,19 +410,7 @@ static unsigned int nf_nat_sip_expect(struct sk_buff *skb, unsigned int protoff,
exp->dir = !dir;
exp->expectfn = nf_nat_sip_expected;
for (; port != 0; port++) {
int ret;
exp->tuple.dst.u.udp.port = htons(port);
ret = nf_ct_expect_related(exp, NF_CT_EXP_F_SKIP_MASTER);
if (ret == 0)
break;
else if (ret != -EBUSY) {
port = 0;
break;
}
}
port = nf_nat_exp_find_port(exp, port);
if (port == 0) {
nf_ct_helper_log(skb, ct, "all ports in use for SIP");
return NF_DROP;
......
......@@ -742,7 +742,7 @@ __printf(2, 3) int nft_request_module(struct net *net, const char *fmt,
return -ENOMEM;
req->done = false;
strlcpy(req->module, module_name, MODULE_NAME_LEN);
strscpy(req->module, module_name, MODULE_NAME_LEN);
list_add_tail(&req->list, &nft_net->module_list);
return -EAGAIN;
......
......@@ -51,7 +51,7 @@ static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs,
snprintf(os_match, NFT_OSF_MAXGENRELEN, "%s:%s",
data.genre, data.version);
else
strlcpy(os_match, data.genre, NFT_OSF_MAXGENRELEN);
strscpy(os_match, data.genre, NFT_OSF_MAXGENRELEN);
strncpy((char *)dest, os_match, NFT_OSF_MAXGENRELEN);
}
......
......@@ -766,7 +766,7 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
msize += off;
m->u.user.match_size = msize;
strlcpy(name, match->name, sizeof(name));
strscpy(name, match->name, sizeof(name));
module_put(match->me);
strncpy(m->u.user.name, name, sizeof(m->u.user.name));
......@@ -1146,7 +1146,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
tsize += off;
t->u.user.target_size = tsize;
strlcpy(name, target->name, sizeof(name));
strscpy(name, target->name, sizeof(name));
module_put(target->me);
strncpy(t->u.user.name, name, sizeof(t->u.user.name));
......@@ -1827,7 +1827,7 @@ int xt_proto_init(struct net *net, u_int8_t af)
root_uid = make_kuid(net->user_ns, 0);
root_gid = make_kgid(net->user_ns, 0);
strlcpy(buf, xt_prefix[af], sizeof(buf));
strscpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TABLES, sizeof(buf));
proc = proc_create_net_data(buf, 0440, net->proc_net, &xt_table_seq_ops,
sizeof(struct seq_net_private),
......@@ -1837,7 +1837,7 @@ int xt_proto_init(struct net *net, u_int8_t af)
if (uid_valid(root_uid) && gid_valid(root_gid))
proc_set_user(proc, root_uid, root_gid);
strlcpy(buf, xt_prefix[af], sizeof(buf));
strscpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_MATCHES, sizeof(buf));
proc = proc_create_seq_private(buf, 0440, net->proc_net,
&xt_match_seq_ops, sizeof(struct nf_mttg_trav),
......@@ -1847,7 +1847,7 @@ int xt_proto_init(struct net *net, u_int8_t af)
if (uid_valid(root_uid) && gid_valid(root_gid))
proc_set_user(proc, root_uid, root_gid);
strlcpy(buf, xt_prefix[af], sizeof(buf));
strscpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TARGETS, sizeof(buf));
proc = proc_create_seq_private(buf, 0440, net->proc_net,
&xt_target_seq_ops, sizeof(struct nf_mttg_trav),
......@@ -1862,12 +1862,12 @@ int xt_proto_init(struct net *net, u_int8_t af)
#ifdef CONFIG_PROC_FS
out_remove_matches:
strlcpy(buf, xt_prefix[af], sizeof(buf));
strscpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_MATCHES, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
out_remove_tables:
strlcpy(buf, xt_prefix[af], sizeof(buf));
strscpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TABLES, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
out:
......@@ -1881,15 +1881,15 @@ void xt_proto_fini(struct net *net, u_int8_t af)
#ifdef CONFIG_PROC_FS
char buf[XT_FUNCTION_MAXNAMELEN];
strlcpy(buf, xt_prefix[af], sizeof(buf));
strscpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TABLES, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
strlcpy(buf, xt_prefix[af], sizeof(buf));
strscpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TARGETS, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
strlcpy(buf, xt_prefix[af], sizeof(buf));
strscpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_MATCHES, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
#endif /*CONFIG_PROC_FS*/
......
......@@ -144,7 +144,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
goto err1;
gnet_stats_basic_sync_init(&est->bstats);
strlcpy(est->name, info->name, sizeof(est->name));
strscpy(est->name, info->name, sizeof(est->name));
spin_lock_init(&est->lock);
est->refcnt = 1;
est->params.interval = info->interval;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment