Commit c57107c7 authored by David S. Miller's avatar David S. Miller

Merge branch 'nlattr_align'

Nicolas Dichtel says:

====================
libnl: enhance API to ease 64bit alignment for attribute

Here is a proposal to add more helpers in the libnetlink to manage 64-bit
alignment issues.
Note that this series was only tested on x86 by tweeking
CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS and adding some traces.

The first patch adds helpers for 64bit alignment and other patches
use them.

We could also add helpers for nla_put_u64() and its variants if needed.

v1 -> v2:
 - remove patch #1
 - split patch #2 (now #1 and #2)
 - add nla_need_padding_for_64bit()
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 732912d7 3d6b66c1
......@@ -244,13 +244,21 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count);
int nla_memcmp(const struct nlattr *nla, const void *data, size_t size);
int nla_strcmp(const struct nlattr *nla, const char *str);
struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
int attrlen, int padattr);
void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype,
int attrlen, int padattr);
void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
const void *data);
void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
const void *data, int padattr);
void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
const void *data, int padattr);
int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
int nla_append(struct sk_buff *skb, int attrlen, const void *data);
......@@ -1230,6 +1238,27 @@ static inline int nla_validate_nested(const struct nlattr *start, int maxtype,
return nla_validate(nla_data(start), nla_len(start), maxtype, policy);
}
/**
* nla_need_padding_for_64bit - test 64-bit alignment of the next attribute
* @skb: socket buffer the message is stored in
*
* Return true if padding is needed to align the next attribute (nla_data()) to
* a 64-bit aligned area.
*/
static inline bool nla_need_padding_for_64bit(struct sk_buff *skb)
{
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
/* The nlattr header is 4 bytes in size, that's why we test
* if the skb->data _is_ aligned. A NOP attribute, plus
* nlattr header for next attribute, will make nla_data()
* 8-byte aligned.
*/
if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8))
return true;
#endif
return false;
}
/**
* nla_align_64bit - 64-bit align the nla_data() of next attribute
* @skb: socket buffer the message is stored in
......@@ -1244,16 +1273,10 @@ static inline int nla_validate_nested(const struct nlattr *start, int maxtype,
*/
static inline int nla_align_64bit(struct sk_buff *skb, int padattr)
{
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
/* The nlattr header is 4 bytes in size, that's why we test
* if the skb->data _is_ aligned. This NOP attribute, plus
* nlattr header for next attribute, will make nla_data()
* 8-byte aligned.
*/
if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8) &&
if (nla_need_padding_for_64bit(skb) &&
!nla_reserve(skb, padattr, 0))
return -EMSGSIZE;
#endif
return 0;
}
......
......@@ -317,6 +317,7 @@ enum rtattr_type_t {
RTA_ENCAP_TYPE,
RTA_ENCAP,
RTA_EXPIRES,
RTA_PAD,
__RTA_MAX
};
......
......@@ -354,6 +354,29 @@ struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
}
EXPORT_SYMBOL(__nla_reserve);
/**
* __nla_reserve_64bit - reserve room for attribute on the skb and align it
* @skb: socket buffer to reserve room on
* @attrtype: attribute type
* @attrlen: length of attribute payload
*
* Adds a netlink attribute header to a socket buffer and reserves
* room for the payload but does not copy it. It also ensure that this
* attribute will be 64-bit aign.
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the attribute header and payload.
*/
struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
int attrlen, int padattr)
{
if (nla_need_padding_for_64bit(skb))
nla_align_64bit(skb, padattr);
return __nla_reserve(skb, attrtype, attrlen);
}
EXPORT_SYMBOL(__nla_reserve_64bit);
/**
* __nla_reserve_nohdr - reserve room for attribute without header
* @skb: socket buffer to reserve room on
......@@ -396,6 +419,35 @@ struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
}
EXPORT_SYMBOL(nla_reserve);
/**
* nla_reserve_64bit - reserve room for attribute on the skb and align it
* @skb: socket buffer to reserve room on
* @attrtype: attribute type
* @attrlen: length of attribute payload
*
* Adds a netlink attribute header to a socket buffer and reserves
* room for the payload but does not copy it. It also ensure that this
* attribute will be 64-bit aign.
*
* Returns NULL if the tailroom of the skb is insufficient to store
* the attribute header and payload.
*/
struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen,
int padattr)
{
size_t len;
if (nla_need_padding_for_64bit(skb))
len = nla_total_size_64bit(attrlen);
else
len = nla_total_size(attrlen);
if (unlikely(skb_tailroom(skb) < len))
return NULL;
return __nla_reserve_64bit(skb, attrtype, attrlen, padattr);
}
EXPORT_SYMBOL(nla_reserve_64bit);
/**
* nla_reserve_nohdr - reserve room for attribute without header
* @skb: socket buffer to reserve room on
......@@ -435,6 +487,26 @@ void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
}
EXPORT_SYMBOL(__nla_put);
/**
* __nla_put_64bit - Add a netlink attribute to a socket buffer and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the attribute header and payload.
*/
void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
const void *data, int padattr)
{
struct nlattr *nla;
nla = __nla_reserve_64bit(skb, attrtype, attrlen, padattr);
memcpy(nla_data(nla), data, attrlen);
}
EXPORT_SYMBOL(__nla_put_64bit);
/**
* __nla_put_nohdr - Add a netlink attribute without header
* @skb: socket buffer to add attribute to
......@@ -473,6 +545,33 @@ int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
}
EXPORT_SYMBOL(nla_put);
/**
* nla_put_64bit - Add a netlink attribute to a socket buffer and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
* the attribute header and payload.
*/
int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
const void *data, int padattr)
{
size_t len;
if (nla_need_padding_for_64bit(skb))
len = nla_total_size_64bit(attrlen);
else
len = nla_total_size(attrlen);
if (unlikely(skb_tailroom(skb) < len))
return -EMSGSIZE;
__nla_put_64bit(skb, attrtype, attrlen, data, padattr);
return 0;
}
EXPORT_SYMBOL(nla_put_64bit);
/**
* nla_put_nohdr - Add a netlink attribute without header
* @skb: socket buffer to add attribute to
......
......@@ -1051,14 +1051,9 @@ static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
{
struct rtnl_link_stats64 *sp;
struct nlattr *attr;
int err;
err = nla_align_64bit(skb, IFLA_PAD);
if (err)
return err;
attr = nla_reserve(skb, IFLA_STATS64,
sizeof(struct rtnl_link_stats64));
attr = nla_reserve_64bit(skb, IFLA_STATS64,
sizeof(struct rtnl_link_stats64), IFLA_PAD);
if (!attr)
return -EMSGSIZE;
......@@ -3469,17 +3464,10 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
if (filter_mask & IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_64)) {
struct rtnl_link_stats64 *sp;
int err;
/* if necessary, add a zero length NOP attribute so that
* IFLA_STATS_LINK_64 will be 64-bit aligned
*/
err = nla_align_64bit(skb, IFLA_STATS_UNSPEC);
if (err)
goto nla_put_failure;
attr = nla_reserve(skb, IFLA_STATS_LINK_64,
sizeof(struct rtnl_link_stats64));
attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
sizeof(struct rtnl_link_stats64),
IFLA_STATS_UNSPEC);
if (!attr)
goto nla_put_failure;
......
......@@ -2104,7 +2104,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
mfcs.mfcs_packets = c->mfc_un.res.pkt;
mfcs.mfcs_bytes = c->mfc_un.res.bytes;
mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) < 0)
return -EMSGSIZE;
rtm->rtm_type = RTN_MULTICAST;
......@@ -2237,7 +2237,7 @@ static size_t mroute_msgsize(bool unresolved, int maxvif)
+ nla_total_size(0) /* RTA_MULTIPATH */
+ maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
/* RTA_MFC_STATS */
+ nla_total_size(sizeof(struct rta_mfc_stats))
+ nla_total_size_64bit(sizeof(struct rta_mfc_stats))
;
return len;
......
......@@ -2268,7 +2268,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
mfcs.mfcs_packets = c->mfc_un.res.pkt;
mfcs.mfcs_bytes = c->mfc_un.res.bytes;
mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) < 0)
return -EMSGSIZE;
rtm->rtm_type = RTN_MULTICAST;
......@@ -2411,7 +2411,7 @@ static int mr6_msgsize(bool unresolved, int maxvif)
+ nla_total_size(0) /* RTA_MULTIPATH */
+ maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
/* RTA_MFC_STATS */
+ nla_total_size(sizeof(struct rta_mfc_stats))
+ nla_total_size_64bit(sizeof(struct rta_mfc_stats))
;
return len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment