Commit f40554f6 authored by Stephen Hemminger's avatar Stephen Hemminger

Update kernel headers to 2.6.31

Final 2.6.31 released, so update sanitized headers.
parent f0309aa4
......@@ -19,6 +19,11 @@ enum {
* @packets: number of seen packets
*/
struct gnet_stats_basic
{
__u64 bytes;
__u32 packets;
};
struct gnet_stats_basic_packed
{
__u64 bytes;
__u32 packets;
......
......@@ -55,9 +55,7 @@ struct ifa_cacheinfo
};
/* backwards compatibility for userspace */
#ifndef __KERNEL__
#define IFA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifaddrmsg))))
#define IFA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifaddrmsg))
#endif
#endif
......@@ -120,30 +120,5 @@ struct ethhdr {
__be16 h_proto; /* packet type ID field */
} __attribute__((packed));
#ifdef __KERNEL__
#include <linux/skbuff.h>
static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
{
return (struct ethhdr *)skb_mac_header(skb);
}
int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
#ifdef CONFIG_SYSCTL
extern struct ctl_table ether_table[];
#endif
extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
/*
* Display a 6 byte device address (MAC) in a readable format.
*/
extern char *print_mac(char *buf, const unsigned char *addr);
#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
#define MAC_BUF_SIZE 18
#define DECLARE_MAC_BUF(var) char var[MAC_BUF_SIZE] __maybe_unused
#endif
#endif /* _LINUX_IF_ETHER_H */
......@@ -88,10 +88,8 @@ enum
#define IFLA_MAX (__IFLA_MAX - 1)
/* backwards compatibility for userspace */
#ifndef __KERNEL__
#define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg))))
#define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg))
#endif
/* ifi_flags.
......
......@@ -3,9 +3,6 @@
#include <linux/types.h>
#ifdef __KERNEL__
#include <linux/ip.h>
#endif
#define SIOCGETTUNNEL (SIOCDEVPRIVATE + 0)
#define SIOCADDTUNNEL (SIOCDEVPRIVATE + 1)
......
......@@ -13,304 +13,6 @@
#ifndef _LINUX_IF_VLAN_H_
#define _LINUX_IF_VLAN_H_
#ifdef __KERNEL__
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header)
* that VLAN requires.
*/
#define VLAN_ETH_ALEN 6 /* Octets in one ethernet addr */
#define VLAN_ETH_HLEN 18 /* Total octets in header. */
#define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */
/*
* According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan
*/
#define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */
#define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */
/*
* struct vlan_hdr - vlan header
* @h_vlan_TCI: priority and VLAN ID
* @h_vlan_encapsulated_proto: packet type ID or len
*/
struct vlan_hdr {
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
};
/**
* struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr)
* @h_dest: destination ethernet address
* @h_source: source ethernet address
* @h_vlan_proto: ethernet protocol (always 0x8100)
* @h_vlan_TCI: priority and VLAN ID
* @h_vlan_encapsulated_proto: packet type ID or len
*/
struct vlan_ethhdr {
unsigned char h_dest[ETH_ALEN];
unsigned char h_source[ETH_ALEN];
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
};
#include <linux/skbuff.h>
static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
{
return (struct vlan_ethhdr *)skb_mac_header(skb);
}
#define VLAN_VID_MASK 0xfff
/* found in socket.c */
extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
/* if this changes, algorithm will have to be reworked because this
* depends on completely exhausting the VLAN identifier space. Thus
* it gives constant time look-up, but in many cases it wastes memory.
*/
#define VLAN_GROUP_ARRAY_LEN 4096
#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8
#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_GROUP_ARRAY_LEN/VLAN_GROUP_ARRAY_SPLIT_PARTS)
struct vlan_group {
struct net_device *real_dev; /* The ethernet(like) device
* the vlan is attached to.
*/
unsigned int nr_vlans;
struct hlist_node hlist; /* linked list */
struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
struct rcu_head rcu;
};
static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
u16 vlan_id)
{
struct net_device **array;
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL;
}
static inline void vlan_group_set_device(struct vlan_group *vg,
u16 vlan_id,
struct net_device *dev)
{
struct net_device **array;
if (!vg)
return;
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
}
#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci)
#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci)
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
u16 vlan_tci, int polling);
extern int vlan_hwaccel_do_receive(struct sk_buff *skb);
extern int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci, struct sk_buff *skb);
extern int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci);
#else
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
BUG();
return NULL;
}
static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
{
BUG();
return 0;
}
static inline int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
u16 vlan_tci, int polling)
{
BUG();
return NET_XMIT_SUCCESS;
}
static inline int vlan_hwaccel_do_receive(struct sk_buff *skb)
{
return 0;
}
static inline int vlan_gro_receive(struct napi_struct *napi,
struct vlan_group *grp,
unsigned int vlan_tci, struct sk_buff *skb)
{
return NET_RX_DROP;
}
static inline int vlan_gro_frags(struct napi_struct *napi,
struct vlan_group *grp, unsigned int vlan_tci)
{
return NET_RX_DROP;
}
#endif
/**
* vlan_hwaccel_rx - netif_rx wrapper for VLAN RX acceleration
* @skb: buffer
* @grp: vlan group
* @vlan_tci: VLAN TCI as received from the card
*/
static inline int vlan_hwaccel_rx(struct sk_buff *skb,
struct vlan_group *grp,
u16 vlan_tci)
{
return __vlan_hwaccel_rx(skb, grp, vlan_tci, 0);
}
/**
* vlan_hwaccel_receive_skb - netif_receive_skb wrapper for VLAN RX acceleration
* @skb: buffer
* @grp: vlan group
* @vlan_tci: VLAN TCI as received from the card
*/
static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb,
struct vlan_group *grp,
u16 vlan_tci)
{
return __vlan_hwaccel_rx(skb, grp, vlan_tci, 1);
}
/**
* __vlan_put_tag - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
* Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*/
static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
{
struct vlan_ethhdr *veth;
if (skb_cow_head(skb, VLAN_HLEN) < 0) {
kfree_skb(skb);
return NULL;
}
veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
/* Move the mac addresses to the beginning of the new header. */
memmove(skb->data, skb->data + VLAN_HLEN, 2 * VLAN_ETH_ALEN);
skb->mac_header -= VLAN_HLEN;
/* first, the ethernet type */
veth->h_vlan_proto = htons(ETH_P_8021Q);
/* now, the TCI */
veth->h_vlan_TCI = htons(vlan_tci);
skb->protocol = htons(ETH_P_8021Q);
return skb;
}
/**
* __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
* @skb: skbuff to tag
* @vlan_tci: VLAN TCI to insert
*
* Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
*/
static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
u16 vlan_tci)
{
skb->vlan_tci = vlan_tci;
return skb;
}
#define HAVE_VLAN_PUT_TAG
/**
* vlan_put_tag - inserts VLAN tag according to device features
* @skb: skbuff to tag
* @vlan_tci: VLAN TCI to insert
*
* Assumes skb->dev is the target that will xmit this frame.
* Returns a VLAN tagged skb.
*/
static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
{
if (skb->dev->features & NETIF_F_HW_VLAN_TX) {
return __vlan_hwaccel_put_tag(skb, vlan_tci);
} else {
return __vlan_put_tag(skb, vlan_tci);
}
}
/**
* __vlan_get_tag - get the VLAN ID that is part of the payload
* @skb: skbuff to query
* @vlan_tci: buffer to store vlaue
*
* Returns error if the skb is not of VLAN type
*/
static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
if (veth->h_vlan_proto != htons(ETH_P_8021Q)) {
return -EINVAL;
}
*vlan_tci = ntohs(veth->h_vlan_TCI);
return 0;
}
/**
* __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
* @skb: skbuff to query
* @vlan_tci: buffer to store vlaue
*
* Returns error if @skb->vlan_tci is not set correctly
*/
static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
u16 *vlan_tci)
{
if (vlan_tx_tag_present(skb)) {
*vlan_tci = skb->vlan_tci;
return 0;
} else {
*vlan_tci = 0;
return -EINVAL;
}
}
#define HAVE_VLAN_GET_TAG
/**
* vlan_get_tag - get the VLAN ID from the skb
* @skb: skbuff to query
* @vlan_tci: buffer to store vlaue
*
* Returns error if the skb is not VLAN tagged
*/
static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
if (skb->dev->features & NETIF_F_HW_VLAN_TX) {
return __vlan_hwaccel_get_tag(skb, vlan_tci);
} else {
return __vlan_get_tag(skb, vlan_tci);
}
}
#endif /* __KERNEL__ */
/* VLAN IOCTLs are found in sockios.h */
......
......@@ -120,21 +120,5 @@ struct tcpvegas_info {
__u32 tcpv_minrtt;
};
#ifdef __KERNEL__
struct sock;
struct inet_hashinfo;
struct inet_diag_handler {
struct inet_hashinfo *idiag_hashinfo;
void (*idiag_get_info)(struct sock *sk,
struct inet_diag_msg *r,
void *info);
__u16 idiag_info_size;
__u16 idiag_type;
};
extern int inet_diag_register(const struct inet_diag_handler *handler);
extern void inet_diag_unregister(const struct inet_diag_handler *handler);
#endif /* __KERNEL__ */
#endif /* _INET_DIAG_H_ */
This diff is collapsed.
......@@ -8,7 +8,6 @@
#include <linux/netfilter.h>
/* only for userspace compatibility */
#ifndef __KERNEL__
/* IP Cache bits. */
/* Src IP address. */
#define NFC_IP_SRC 0x0001
......@@ -49,7 +48,6 @@
/* Packets about to hit the wire. */
#define NF_IP_POST_ROUTING 4
#define NF_IP_NUMHOOKS 5
#endif /* ! __KERNEL__ */
enum nf_ip_hook_priorities {
NF_IP_PRI_FIRST = INT_MIN,
......@@ -73,11 +71,5 @@ enum nf_ip_hook_priorities {
/* 2.4 firewalling went 64 through 67. */
#define SO_ORIGINAL_DST 80
#ifdef __KERNEL__
extern int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
extern int ip_xfrm_me_harder(struct sk_buff *skb);
extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
#endif /*__KERNEL__*/
#endif /*__LINUX_IP_NETFILTER_H*/
......@@ -151,122 +151,5 @@ struct nlattr
#define NLA_ALIGN(len) (((len) + NLA_ALIGNTO - 1) & ~(NLA_ALIGNTO - 1))
#define NLA_HDRLEN ((int) NLA_ALIGN(sizeof(struct nlattr)))
#ifdef __KERNEL__
#include <linux/capability.h>
#include <linux/skbuff.h>
static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
{
return (struct nlmsghdr *)skb->data;
}
struct netlink_skb_parms
{
struct ucred creds; /* Skb credentials */
__u32 pid;
__u32 dst_group;
kernel_cap_t eff_cap;
__u32 loginuid; /* Login (audit) uid */
__u32 sessionid; /* Session id (audit) */
__u32 sid; /* SELinux security id */
};
#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds)
extern struct sock *netlink_kernel_create(struct net *net,
int unit,unsigned int groups,
void (*input)(struct sk_buff *skb),
struct mutex *cb_mutex,
struct module *module);
extern void netlink_kernel_release(struct sock *sk);
extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group);
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid,
__u32 group, gfp_t allocation);
extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code);
extern int netlink_register_notifier(struct notifier_block *nb);
extern int netlink_unregister_notifier(struct notifier_block *nb);
/* finegrained unicast helpers: */
struct sock *netlink_getsockbyfilp(struct file *filp);
int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
long *timeo, struct sock *ssk);
void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
int netlink_sendskb(struct sock *sk, struct sk_buff *skb);
/*
* skb should fit one page. This choice is good for headerless malloc.
* But we should limit to 8K so that userspace does not have to
* use enormous buffer sizes on recvmsg() calls just to avoid
* MSG_TRUNC when PAGE_SIZE is very large.
*/
#if PAGE_SIZE < 8192UL
#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(PAGE_SIZE)
#else
#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(8192UL)
#endif
#define NLMSG_DEFAULT_SIZE (NLMSG_GOODSIZE - NLMSG_HDRLEN)
struct netlink_callback
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
int (*dump)(struct sk_buff * skb, struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
int family;
long args[6];
};
struct netlink_notify
{
struct net *net;
int pid;
int protocol;
};
static __inline__ struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
{
struct nlmsghdr *nlh;
int size = NLMSG_LENGTH(len);
nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
nlh->nlmsg_type = type;
nlh->nlmsg_len = size;
nlh->nlmsg_flags = flags;
nlh->nlmsg_pid = pid;
nlh->nlmsg_seq = seq;
if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
return nlh;
}
#define NLMSG_NEW(skb, pid, seq, type, len, flags) \
({ if (unlikely(skb_tailroom(skb) < (int)NLMSG_SPACE(len))) \
goto nlmsg_failure; \
__nlmsg_put(skb, pid, seq, type, len, flags); })
#define NLMSG_PUT(skb, pid, seq, type, len) \
NLMSG_NEW(skb, pid, seq, type, len, 0)
extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
struct nlmsghdr *nlh,
int (*dump)(struct sk_buff *skb, struct netlink_callback*),
int (*done)(struct netlink_callback*));
#define NL_NONROOT_RECV 0x1
#define NL_NONROOT_SEND 0x2
extern void netlink_set_nonroot(int protocol, unsigned flag);
#endif /* __KERNEL__ */
#endif /* __LINUX_NETLINK_H */
......@@ -524,7 +524,6 @@ enum
#define NDUSEROPT_MAX (__NDUSEROPT_MAX - 1)
#ifndef __KERNEL__
/* RTnetlink multicast groups - backwards compatibility for userspace */
#define RTMGRP_LINK 1
#define RTMGRP_NOTIFY 2
......@@ -545,7 +544,6 @@ enum
#define RTMGRP_DECnet_ROUTE 0x4000
#define RTMGRP_IPV6_PREFIX 0x20000
#endif
/* RTnetlink multicast groups */
enum rtnetlink_groups {
......@@ -611,169 +609,6 @@ struct tcamsg
/* End of information exported to user level */
#ifdef __KERNEL__
#include <linux/mutex.h>
static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str)
{
int len = strlen(str) + 1;
return len > rta->rta_len || memcmp(RTA_DATA(rta), str, len);
}
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid);
extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
u32 group, struct nlmsghdr *nlh, gfp_t flags);
extern void rtnl_set_sk_err(struct net *net, u32 group, int error);
extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
u32 id, u32 ts, u32 tsage, long expires,
u32 error);
extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
#define RTA_PUT(skb, attrtype, attrlen, data) \
({ if (unlikely(skb_tailroom(skb) < (int)RTA_SPACE(attrlen))) \
goto rtattr_failure; \
__rta_fill(skb, attrtype, attrlen, data); })
#define RTA_APPEND(skb, attrlen, data) \
({ if (unlikely(skb_tailroom(skb) < (int)(attrlen))) \
goto rtattr_failure; \
memcpy(skb_put(skb, attrlen), data, attrlen); })
#define RTA_PUT_NOHDR(skb, attrlen, data) \
({ RTA_APPEND(skb, RTA_ALIGN(attrlen), data); \
memset(skb_tail_pointer(skb) - (RTA_ALIGN(attrlen) - attrlen), 0, \
RTA_ALIGN(attrlen) - attrlen); })
#define RTA_PUT_U8(skb, attrtype, value) \
({ u8 _tmp = (value); \
RTA_PUT(skb, attrtype, sizeof(u8), &_tmp); })
#define RTA_PUT_U16(skb, attrtype, value) \
({ u16 _tmp = (value); \
RTA_PUT(skb, attrtype, sizeof(u16), &_tmp); })
#define RTA_PUT_U32(skb, attrtype, value) \
({ u32 _tmp = (value); \
RTA_PUT(skb, attrtype, sizeof(u32), &_tmp); })
#define RTA_PUT_U64(skb, attrtype, value) \
({ u64 _tmp = (value); \
RTA_PUT(skb, attrtype, sizeof(u64), &_tmp); })
#define RTA_PUT_SECS(skb, attrtype, value) \
RTA_PUT_U64(skb, attrtype, (value) / HZ)
#define RTA_PUT_MSECS(skb, attrtype, value) \
RTA_PUT_U64(skb, attrtype, jiffies_to_msecs(value))
#define RTA_PUT_STRING(skb, attrtype, value) \
RTA_PUT(skb, attrtype, strlen(value) + 1, value)
#define RTA_PUT_FLAG(skb, attrtype) \
RTA_PUT(skb, attrtype, 0, NULL);
#define RTA_NEST(skb, type) \
({ struct rtattr *__start = (struct rtattr *)skb_tail_pointer(skb); \
RTA_PUT(skb, type, 0, NULL); \
__start; })
#define RTA_NEST_END(skb, start) \
({ (start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \
(skb)->len; })
#define RTA_NEST_COMPAT(skb, type, attrlen, data) \
({ struct rtattr *__start = (struct rtattr *)skb_tail_pointer(skb); \
RTA_PUT(skb, type, attrlen, data); \
RTA_NEST(skb, type); \
__start; })
#define RTA_NEST_COMPAT_END(skb, start) \
({ struct rtattr *__nest = (void *)(start) + NLMSG_ALIGN((start)->rta_len); \
(start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \
RTA_NEST_END(skb, __nest); \
(skb)->len; })
#define RTA_NEST_CANCEL(skb, start) \
({ if (start) \
skb_trim(skb, (unsigned char *) (start) - (skb)->data); \
-1; })
#define RTA_GET_U8(rta) \
({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u8)) \
goto rtattr_failure; \
*(u8 *) RTA_DATA(rta); })
#define RTA_GET_U16(rta) \
({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u16)) \
goto rtattr_failure; \
*(u16 *) RTA_DATA(rta); })
#define RTA_GET_U32(rta) \
({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u32)) \
goto rtattr_failure; \
*(u32 *) RTA_DATA(rta); })
#define RTA_GET_U64(rta) \
({ u64 _tmp; \
if (!rta || RTA_PAYLOAD(rta) < sizeof(u64)) \
goto rtattr_failure; \
memcpy(&_tmp, RTA_DATA(rta), sizeof(_tmp)); \
_tmp; })
#define RTA_GET_FLAG(rta) (!!(rta))
#define RTA_GET_SECS(rta) ((unsigned long) RTA_GET_U64(rta) * HZ)
#define RTA_GET_MSECS(rta) (msecs_to_jiffies((unsigned long) RTA_GET_U64(rta)))
static inline struct rtattr *
__rta_reserve(struct sk_buff *skb, int attrtype, int attrlen)
{
struct rtattr *rta;
int size = RTA_LENGTH(attrlen);
rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size));
rta->rta_type = attrtype;
rta->rta_len = size;
memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
return rta;
}
#define __RTA_PUT(skb, attrtype, attrlen) \
({ if (unlikely(skb_tailroom(skb) < (int)RTA_SPACE(attrlen))) \
goto rtattr_failure; \
__rta_reserve(skb, attrtype, attrlen); })
extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change);
/* RTNL is used as a global lock for all changes to network configuration */
extern void rtnl_lock(void);
extern void rtnl_unlock(void);
extern int rtnl_trylock(void);
extern int rtnl_is_locked(void);
extern void rtnetlink_init(void);
extern void __rtnl_unlock(void);
#define ASSERT_RTNL() do { \
if (unlikely(!rtnl_is_locked())) { \
printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \
__FILE__, __LINE__); \
dump_stack(); \
} \
} while(0)
static inline u32 rtm_get_table(struct rtattr **rta, u8 table)
{
return RTA_GET_U32(rta[RTA_TABLE-1]);
rtattr_failure:
return table;
}
#endif /* __KERNEL__ */
#endif /* __LINUX_RTNETLINK_H */
......@@ -22,14 +22,8 @@ struct __kernel_sockaddr_storage {
#include <linux/sockios.h> /* the SIOCxxx I/O controls */
#include <linux/uio.h> /* iovec support */
#include <linux/types.h> /* pid_t */
#include <linux/compiler.h> /* __user */
/* */
#ifdef __KERNEL__
# ifdef CONFIG_PROC_FS
struct seq_file;
extern void socket_seq_show(struct seq_file *seq);
# endif
#endif /* __KERNEL__ */
typedef unsigned short sa_family_t;
......@@ -104,12 +98,10 @@ struct cmsghdr {
* This mess will go away with glibc
*/
#ifdef __KERNEL__
#define __KINLINE static inline
#elif defined(__GNUC__)
#if defined(__GNUC__)
#define __KINLINE static __inline__
#elif defined(__cplusplus)
#define __KINLINE static inline
#define __KINLINE static __inline__
#else
#define __KINLINE static
#endif
......@@ -310,23 +302,5 @@ struct ucred {
/* IPX options */
#define IPX_TYPE 1
#ifdef __KERNEL__
extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
int offset, int len);
extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
struct iovec *iov,
int offset,
unsigned int len, __wsum *csump);
extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
int offset, int len);
extern int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr, int __user *ulen);
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
#endif
#endif /* not kernel and not glibc */
#endif /* _LINUX_SOCKET_H */
......@@ -4,150 +4,9 @@
#include <asm/types.h>
#ifndef __ASSEMBLY__
#ifdef __KERNEL__
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
#endif
#include <linux/posix_types.h>
#ifdef __KERNEL__
typedef __u32 __kernel_dev_t;
typedef __kernel_fd_set fd_set;
typedef __kernel_dev_t dev_t;
typedef __kernel_ino_t ino_t;
typedef __kernel_mode_t mode_t;
typedef __kernel_nlink_t nlink_t;
typedef __kernel_off_t off_t;
typedef __kernel_pid_t pid_t;
typedef __kernel_daddr_t daddr_t;
typedef __kernel_key_t key_t;
typedef __kernel_suseconds_t suseconds_t;
typedef __kernel_timer_t timer_t;
typedef __kernel_clockid_t clockid_t;
typedef __kernel_mqd_t mqd_t;
typedef _Bool bool;
typedef __kernel_uid32_t uid_t;
typedef __kernel_gid32_t gid_t;
typedef __kernel_uid16_t uid16_t;
typedef __kernel_gid16_t gid16_t;
typedef unsigned long uintptr_t;
#ifdef CONFIG_UID16
/* This is defined by include/asm-{arch}/posix_types.h */
typedef __kernel_old_uid_t old_uid_t;
typedef __kernel_old_gid_t old_gid_t;
#endif /* CONFIG_UID16 */
#if defined(__GNUC__)
typedef __kernel_loff_t loff_t;
#endif
/*
* The following typedefs are also protected by individual ifdefs for
* historical reasons:
*/
#ifndef _SIZE_T
#define _SIZE_T
typedef __kernel_size_t size_t;
#endif
#ifndef _SSIZE_T
#define _SSIZE_T
typedef __kernel_ssize_t ssize_t;
#endif
#ifndef _PTRDIFF_T
#define _PTRDIFF_T
typedef __kernel_ptrdiff_t ptrdiff_t;
#endif
#ifndef _TIME_T
#define _TIME_T
typedef __kernel_time_t time_t;
#endif
#ifndef _CLOCK_T
#define _CLOCK_T
typedef __kernel_clock_t clock_t;
#endif
#ifndef _CADDR_T
#define _CADDR_T
typedef __kernel_caddr_t caddr_t;
#endif
/* bsd */
typedef unsigned char u_char;
typedef unsigned short u_short;
typedef unsigned int u_int;
typedef unsigned long u_long;
/* sysv */
typedef unsigned char unchar;
typedef unsigned short ushort;
typedef unsigned int uint;
typedef unsigned long ulong;
#ifndef __BIT_TYPES_DEFINED__
#define __BIT_TYPES_DEFINED__
typedef __u8 u_int8_t;
typedef __s8 int8_t;
typedef __u16 u_int16_t;
typedef __s16 int16_t;
typedef __u32 u_int32_t;
typedef __s32 int32_t;
#endif /* !(__BIT_TYPES_DEFINED__) */
typedef __u8 uint8_t;
typedef __u16 uint16_t;
typedef __u32 uint32_t;
#if defined(__GNUC__)
typedef __u64 uint64_t;
typedef __u64 u_int64_t;
typedef __s64 int64_t;
#endif
/* this is a special 64bit data type that is 8-byte aligned */
#define aligned_u64 __u64 __attribute__((aligned(8)))
#define aligned_be64 __be64 __attribute__((aligned(8)))
#define aligned_le64 __le64 __attribute__((aligned(8)))
/**
* The type used for indexing onto a disc or disc partition.
*
* Linux always considers sectors to be 512 bytes long independently
* of the devices real block size.
*
* blkcnt_t is the type of the inode's block count.
*/
#ifdef CONFIG_LBDAF
typedef u64 sector_t;
typedef u64 blkcnt_t;
#else
typedef unsigned long sector_t;
typedef unsigned long blkcnt_t;
#endif
/*
* The type of an index into the pagecache. Use a #define so asm/types.h
* can override it.
*/
#ifndef pgoff_t
#define pgoff_t unsigned long
#endif
#endif /* __KERNEL__ */
/*
* Below are truly Linux-specific types that should never collide with
......@@ -175,35 +34,5 @@ typedef __u64 __bitwise __be64;
typedef __u16 __bitwise __sum16;
typedef __u32 __bitwise __wsum;
#ifdef __KERNEL__
typedef unsigned __bitwise__ gfp_t;
typedef unsigned __bitwise__ fmode_t;
#ifdef CONFIG_PHYS_ADDR_T_64BIT
typedef u64 phys_addr_t;
#else
typedef u32 phys_addr_t;
#endif
typedef phys_addr_t resource_size_t;
typedef struct {
volatile int counter;
} atomic_t;
#ifdef CONFIG_64BIT
typedef struct {
volatile long counter;
} atomic64_t;
#endif
struct ustat {
__kernel_daddr_t f_tfree;
__kernel_ino_t f_tinode;
char f_fname[6];
char f_fpack[6];
};
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_TYPES_H */
......@@ -450,14 +450,12 @@ struct xfrm_user_mapping {
__be16 new_sport;
};
#ifndef __KERNEL__
/* backwards compatibility for userspace */
#define XFRMGRP_ACQUIRE 1
#define XFRMGRP_EXPIRE 2
#define XFRMGRP_SA 4
#define XFRMGRP_POLICY 8
#define XFRMGRP_REPORT 0x20
#endif
enum xfrm_nlgroups {
XFRMNLGRP_NONE,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment