Commit 201dacbb authored by David S. Miller's avatar David S. Miller

Merge branch 'net-checkpatch'

Tobin C. Harding says:

====================
Whitespace checkpatch fixes

This patch set fixes various whitespace checkpatch errors and warnings.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b668b903 f4563a75
......@@ -192,7 +192,8 @@ static seqcount_t devnet_rename_seq;
static inline void dev_base_seq_inc(struct net *net)
{
while (++net->dev_base_seq == 0);
while (++net->dev_base_seq == 0)
;
}
static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
......@@ -274,8 +275,8 @@ EXPORT_PER_CPU_SYMBOL(softnet_data);
* register_netdevice() inits txq->_xmit_lock and sets lockdep class
* according to dev->type
*/
static const unsigned short netdev_lock_type[] =
{ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
static const unsigned short netdev_lock_type[] = {
ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
......@@ -291,8 +292,8 @@ static const unsigned short netdev_lock_type[] =
ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
static const char *const netdev_lock_name[] =
{"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
static const char *const netdev_lock_name[] = {
"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
"_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
"_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
"_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
......@@ -352,10 +353,11 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
#endif
/*******************************************************************************
*
* Protocol management and registration routines
*
*******************************************************************************/
Protocol management and registration routines
*******************************************************************************/
/*
* Add a protocol ID to the list. Now that the input handler is
......@@ -538,10 +540,10 @@ void dev_remove_offload(struct packet_offload *po)
EXPORT_SYMBOL(dev_remove_offload);
/******************************************************************************
Device Boot-time Settings Routines
*******************************************************************************/
*
* Device Boot-time Settings Routines
*
******************************************************************************/
/* Boot time configuration table */
static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
......@@ -663,10 +665,10 @@ int __init netdev_boot_setup(char *str)
__setup("netdev=", netdev_boot_setup);
/*******************************************************************************
Device Interface Subroutines
*******************************************************************************/
*
* Device Interface Subroutines
*
*******************************************************************************/
/**
* dev_get_iflink - get 'iflink' value of a interface
......@@ -2496,6 +2498,7 @@ u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
if (dev->num_tc) {
u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
qoffset = dev->tc_to_txq[tc].offset;
qcount = dev->tc_to_txq[tc].count;
}
......@@ -2717,9 +2720,11 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
{
#ifdef CONFIG_HIGHMEM
int i;
if (!(dev->features & NETIF_F_HIGHDMA)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (PageHighMem(skb_frag_page(frag)))
return 1;
}
......@@ -2733,6 +2738,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t addr = page_to_phys(skb_frag_page(frag));
if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
return 1;
}
......@@ -3208,6 +3214,7 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
if (queue_index < 0 || skb->ooo_okay ||
queue_index >= dev->real_num_tx_queues) {
int new_index = get_xps_queue(dev, skb);
if (new_index < 0)
new_index = skb_tx_hash(dev, skb);
......@@ -3237,6 +3244,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
if (dev->real_num_tx_queues != 1) {
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_select_queue)
queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
__netdev_pick_tx);
......@@ -3325,16 +3333,16 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
}
/* The device has no queue. Common case for software devices:
loopback, all the sorts of tunnels...
* loopback, all the sorts of tunnels...
Really, it is unlikely that netif_tx_lock protection is necessary
here. (f.e. loopback and IP tunnels are clean ignoring statistics
counters.)
However, it is possible, that they rely on protection
made by us here.
* Really, it is unlikely that netif_tx_lock protection is necessary
* here. (f.e. loopback and IP tunnels are clean ignoring statistics
* counters.)
* However, it is possible, that they rely on protection
* made by us here.
Check this and shot the lock. It is not prone from deadlocks.
Either shot noqueue qdisc, it is even simpler 8)
* Check this and shot the lock. It is not prone from deadlocks.
*Either shot noqueue qdisc, it is even simpler 8)
*/
if (dev->flags & IFF_UP) {
int cpu = smp_processor_id(); /* ok because BHs are off */
......@@ -3396,9 +3404,9 @@ int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
EXPORT_SYMBOL(dev_queue_xmit_accel);
/*=======================================================================
Receiver routines
=======================================================================*/
/*************************************************************************
* Receiver routines
*************************************************************************/
int netdev_max_backlog __read_mostly = 1000;
EXPORT_SYMBOL(netdev_max_backlog);
......@@ -3766,6 +3774,7 @@ static int netif_rx_internal(struct sk_buff *skb)
#endif
{
unsigned int qtail;
ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
put_cpu();
}
......@@ -3825,6 +3834,7 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
while (clist) {
struct sk_buff *skb = clist;
clist = clist->next;
WARN_ON(atomic_read(&skb->users));
......@@ -5661,6 +5671,7 @@ static int netdev_adjacent_sysfs_add(struct net_device *dev,
struct list_head *dev_list)
{
char linkname[IFNAMSIZ+7];
sprintf(linkname, dev_list == &dev->adj_list.upper ?
"upper_%s" : "lower_%s", adj_dev->name);
return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
......@@ -5671,6 +5682,7 @@ static void netdev_adjacent_sysfs_del(struct net_device *dev,
struct list_head *dev_list)
{
char linkname[IFNAMSIZ+7];
sprintf(linkname, dev_list == &dev->adj_list.upper ?
"upper_%s" : "lower_%s", name);
sysfs_remove_link(&(dev->dev.kobj), linkname);
......@@ -5940,6 +5952,7 @@ void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev)
{
struct netdev_notifier_changeupper_info changeupper_info;
ASSERT_RTNL();
changeupper_info.upper_dev = upper_dev;
......@@ -6358,8 +6371,8 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags)
}
/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
is important. Some (broken) drivers set IFF_PROMISC, when
IFF_ALLMULTI is requested not asking us and not reporting.
* is important. Some (broken) drivers set IFF_PROMISC, when
* IFF_ALLMULTI is requested not asking us and not reporting.
*/
if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
......@@ -6657,6 +6670,7 @@ EXPORT_SYMBOL(dev_change_xdp_fd);
static int dev_new_index(struct net *net)
{
int ifindex = net->ifindex;
for (;;) {
if (++ifindex <= 0)
ifindex = 1;
......@@ -6723,7 +6737,7 @@ static void rollback_registered_many(struct list_head *head)
/* Notify protocols, that we are about to destroy
this device. They should clean all the things.
* this device. They should clean all the things.
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
......@@ -7070,6 +7084,7 @@ void netif_tx_stop_all_queues(struct net_device *dev)
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_stop_queue(txq);
}
}
......@@ -7670,9 +7685,9 @@ EXPORT_SYMBOL(alloc_netdev_mqs);
* @dev: device
*
* This function does the last stage of destroying an allocated device
* interface. The reference to the device object is released.
* If this is the last reference then it will be freed.
* Must be called in process context.
* interface. The reference to the device object is released. If this
* is the last reference then it will be freed.Must be called in process
* context.
*/
void free_netdev(struct net_device *dev)
{
......@@ -7854,11 +7869,11 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
dev_shutdown(dev);
/* Notify protocols, that we are about to destroy
this device. They should clean all the things.
Note that dev->reg_state stays at NETREG_REGISTERED.
This is wanted because this way 8021q and macvlan know
the device is just moving and can keep their slaves up.
* this device. They should clean all the things.
*
* Note that dev->reg_state stays at NETREG_REGISTERED.
* This is wanted because this way 8021q and macvlan know
* the device is just moving and can keep their slaves up.
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
rcu_barrier();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment