Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
79d16385
Commit
79d16385
authored
Jul 08, 2008
by
David S. Miller
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
netdev: Move atomic queue state bits into netdev_queue.
Signed-off-by:
David S. Miller
<
davem@davemloft.net
>
parent
b19fa1fa
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
51 additions
and
26 deletions
+51
-26
include/linux/netdevice.h
include/linux/netdevice.h
+39
-16
include/net/pkt_sched.h
include/net/pkt_sched.h
+1
-1
net/sched/sch_generic.c
net/sched/sch_generic.c
+11
-9
No files found.
include/linux/netdevice.h
View file @
79d16385
...
@@ -281,14 +281,12 @@ struct header_ops {
...
@@ -281,14 +281,12 @@ struct header_ops {
enum
netdev_state_t
enum
netdev_state_t
{
{
__LINK_STATE_XOFF
=
0
,
__LINK_STATE_START
,
__LINK_STATE_START
,
__LINK_STATE_PRESENT
,
__LINK_STATE_PRESENT
,
__LINK_STATE_SCHED
,
__LINK_STATE_SCHED
,
__LINK_STATE_NOCARRIER
,
__LINK_STATE_NOCARRIER
,
__LINK_STATE_LINKWATCH_PENDING
,
__LINK_STATE_LINKWATCH_PENDING
,
__LINK_STATE_DORMANT
,
__LINK_STATE_DORMANT
,
__LINK_STATE_QDISC_RUNNING
,
};
};
...
@@ -448,10 +446,17 @@ static inline void napi_synchronize(const struct napi_struct *n)
...
@@ -448,10 +446,17 @@ static inline void napi_synchronize(const struct napi_struct *n)
# define napi_synchronize(n) barrier()
# define napi_synchronize(n) barrier()
#endif
#endif
enum
netdev_queue_state_t
{
__QUEUE_STATE_XOFF
,
__QUEUE_STATE_QDISC_RUNNING
,
};
struct
netdev_queue
{
struct
netdev_queue
{
spinlock_t
lock
;
spinlock_t
lock
;
struct
net_device
*
dev
;
struct
net_device
*
dev
;
struct
Qdisc
*
qdisc
;
struct
Qdisc
*
qdisc
;
unsigned
long
state
;
struct
sk_buff
*
gso_skb
;
struct
sk_buff
*
gso_skb
;
spinlock_t
_xmit_lock
;
spinlock_t
_xmit_lock
;
int
xmit_lock_owner
;
int
xmit_lock_owner
;
...
@@ -952,9 +957,7 @@ extern void __netif_schedule(struct netdev_queue *txq);
...
@@ -952,9 +957,7 @@ extern void __netif_schedule(struct netdev_queue *txq);
static
inline
void
netif_schedule_queue
(
struct
netdev_queue
*
txq
)
static
inline
void
netif_schedule_queue
(
struct
netdev_queue
*
txq
)
{
{
struct
net_device
*
dev
=
txq
->
dev
;
if
(
!
test_bit
(
__QUEUE_STATE_XOFF
,
&
txq
->
state
))
if
(
!
test_bit
(
__LINK_STATE_XOFF
,
&
dev
->
state
))
__netif_schedule
(
txq
);
__netif_schedule
(
txq
);
}
}
...
@@ -969,9 +972,14 @@ static inline void netif_schedule(struct net_device *dev)
...
@@ -969,9 +972,14 @@ static inline void netif_schedule(struct net_device *dev)
*
*
* Allow upper layers to call the device hard_start_xmit routine.
* Allow upper layers to call the device hard_start_xmit routine.
*/
*/
static
inline
void
netif_tx_start_queue
(
struct
netdev_queue
*
dev_queue
)
{
clear_bit
(
__QUEUE_STATE_XOFF
,
&
dev_queue
->
state
);
}
static
inline
void
netif_start_queue
(
struct
net_device
*
dev
)
static
inline
void
netif_start_queue
(
struct
net_device
*
dev
)
{
{
clear_bit
(
__LINK_STATE_XOFF
,
&
dev
->
stat
e
);
netif_tx_start_queue
(
&
dev
->
tx_queu
e
);
}
}
/**
/**
...
@@ -981,16 +989,21 @@ static inline void netif_start_queue(struct net_device *dev)
...
@@ -981,16 +989,21 @@ static inline void netif_start_queue(struct net_device *dev)
* Allow upper layers to call the device hard_start_xmit routine.
* Allow upper layers to call the device hard_start_xmit routine.
* Used for flow control when transmit resources are available.
* Used for flow control when transmit resources are available.
*/
*/
static
inline
void
netif_
wake_queue
(
struct
net_device
*
dev
)
static
inline
void
netif_
tx_wake_queue
(
struct
netdev_queue
*
dev_queue
)
{
{
#ifdef CONFIG_NETPOLL_TRAP
#ifdef CONFIG_NETPOLL_TRAP
if
(
netpoll_trap
())
{
if
(
netpoll_trap
())
{
clear_bit
(
__
LINK_STATE_XOFF
,
&
dev
->
state
);
clear_bit
(
__
QUEUE_STATE_XOFF
,
&
dev_queue
->
state
);
return
;
return
;
}
}
#endif
#endif
if
(
test_and_clear_bit
(
__LINK_STATE_XOFF
,
&
dev
->
state
))
if
(
test_and_clear_bit
(
__QUEUE_STATE_XOFF
,
&
dev_queue
->
state
))
__netif_schedule
(
&
dev
->
tx_queue
);
__netif_schedule
(
dev_queue
);
}
static
inline
void
netif_wake_queue
(
struct
net_device
*
dev
)
{
netif_tx_wake_queue
(
&
dev
->
tx_queue
);
}
}
/**
/**
...
@@ -1000,9 +1013,14 @@ static inline void netif_wake_queue(struct net_device *dev)
...
@@ -1000,9 +1013,14 @@ static inline void netif_wake_queue(struct net_device *dev)
* Stop upper layers calling the device hard_start_xmit routine.
* Stop upper layers calling the device hard_start_xmit routine.
* Used for flow control when transmit resources are unavailable.
* Used for flow control when transmit resources are unavailable.
*/
*/
static
inline
void
netif_tx_stop_queue
(
struct
netdev_queue
*
dev_queue
)
{
set_bit
(
__QUEUE_STATE_XOFF
,
&
dev_queue
->
state
);
}
static
inline
void
netif_stop_queue
(
struct
net_device
*
dev
)
static
inline
void
netif_stop_queue
(
struct
net_device
*
dev
)
{
{
set_bit
(
__LINK_STATE_XOFF
,
&
dev
->
stat
e
);
netif_tx_stop_queue
(
&
dev
->
tx_queu
e
);
}
}
/**
/**
...
@@ -1011,9 +1029,14 @@ static inline void netif_stop_queue(struct net_device *dev)
...
@@ -1011,9 +1029,14 @@ static inline void netif_stop_queue(struct net_device *dev)
*
*
* Test if transmit queue on device is currently unable to send.
* Test if transmit queue on device is currently unable to send.
*/
*/
static
inline
int
netif_tx_queue_stopped
(
const
struct
netdev_queue
*
dev_queue
)
{
return
test_bit
(
__QUEUE_STATE_XOFF
,
&
dev_queue
->
state
);
}
static
inline
int
netif_queue_stopped
(
const
struct
net_device
*
dev
)
static
inline
int
netif_queue_stopped
(
const
struct
net_device
*
dev
)
{
{
return
test_bit
(
__LINK_STATE_XOFF
,
&
dev
->
stat
e
);
return
netif_tx_queue_stopped
(
&
dev
->
tx_queu
e
);
}
}
/**
/**
...
@@ -1043,7 +1066,7 @@ static inline int netif_running(const struct net_device *dev)
...
@@ -1043,7 +1066,7 @@ static inline int netif_running(const struct net_device *dev)
*/
*/
static
inline
void
netif_start_subqueue
(
struct
net_device
*
dev
,
u16
queue_index
)
static
inline
void
netif_start_subqueue
(
struct
net_device
*
dev
,
u16
queue_index
)
{
{
clear_bit
(
__
LINK
_STATE_XOFF
,
&
dev
->
egress_subqueue
[
queue_index
].
state
);
clear_bit
(
__
QUEUE
_STATE_XOFF
,
&
dev
->
egress_subqueue
[
queue_index
].
state
);
}
}
/**
/**
...
@@ -1059,7 +1082,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
...
@@ -1059,7 +1082,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
if
(
netpoll_trap
())
if
(
netpoll_trap
())
return
;
return
;
#endif
#endif
set_bit
(
__
LINK
_STATE_XOFF
,
&
dev
->
egress_subqueue
[
queue_index
].
state
);
set_bit
(
__
QUEUE
_STATE_XOFF
,
&
dev
->
egress_subqueue
[
queue_index
].
state
);
}
}
/**
/**
...
@@ -1072,7 +1095,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
...
@@ -1072,7 +1095,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
static
inline
int
__netif_subqueue_stopped
(
const
struct
net_device
*
dev
,
static
inline
int
__netif_subqueue_stopped
(
const
struct
net_device
*
dev
,
u16
queue_index
)
u16
queue_index
)
{
{
return
test_bit
(
__
LINK
_STATE_XOFF
,
return
test_bit
(
__
QUEUE
_STATE_XOFF
,
&
dev
->
egress_subqueue
[
queue_index
].
state
);
&
dev
->
egress_subqueue
[
queue_index
].
state
);
}
}
...
@@ -1095,7 +1118,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
...
@@ -1095,7 +1118,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
if
(
netpoll_trap
())
if
(
netpoll_trap
())
return
;
return
;
#endif
#endif
if
(
test_and_clear_bit
(
__
LINK
_STATE_XOFF
,
if
(
test_and_clear_bit
(
__
QUEUE
_STATE_XOFF
,
&
dev
->
egress_subqueue
[
queue_index
].
state
))
&
dev
->
egress_subqueue
[
queue_index
].
state
))
__netif_schedule
(
&
dev
->
tx_queue
);
__netif_schedule
(
&
dev
->
tx_queue
);
}
}
...
...
include/net/pkt_sched.h
View file @
79d16385
...
@@ -91,7 +91,7 @@ static inline void qdisc_run(struct netdev_queue *txq)
...
@@ -91,7 +91,7 @@ static inline void qdisc_run(struct netdev_queue *txq)
struct
net_device
*
dev
=
txq
->
dev
;
struct
net_device
*
dev
=
txq
->
dev
;
if
(
!
netif_queue_stopped
(
dev
)
&&
if
(
!
netif_queue_stopped
(
dev
)
&&
!
test_and_set_bit
(
__
LINK_STATE_QDISC_RUNNING
,
&
dev
->
state
))
!
test_and_set_bit
(
__
QUEUE_STATE_QDISC_RUNNING
,
&
txq
->
state
))
__qdisc_run
(
txq
);
__qdisc_run
(
txq
);
}
}
...
...
net/sched/sch_generic.c
View file @
79d16385
...
@@ -121,9 +121,9 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
...
@@ -121,9 +121,9 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
/*
/*
* NOTE: Called under queue->lock with locally disabled BH.
* NOTE: Called under queue->lock with locally disabled BH.
*
*
* __
LINK_STATE_QDISC_RUNNING guarantees only one CPU can process thi
s
* __
QUEUE_STATE_QDISC_RUNNING guarantees only one CPU can proces
s
*
devic
e at a time. queue->lock serializes queue accesses for
*
this queu
e at a time. queue->lock serializes queue accesses for
* this
devic
e AND txq->qdisc pointer itself.
* this
queu
e AND txq->qdisc pointer itself.
*
*
* netif_tx_lock serializes accesses to device driver.
* netif_tx_lock serializes accesses to device driver.
*
*
...
@@ -206,7 +206,7 @@ void __qdisc_run(struct netdev_queue *txq)
...
@@ -206,7 +206,7 @@ void __qdisc_run(struct netdev_queue *txq)
}
}
}
}
clear_bit
(
__
LINK_STATE_QDISC_RUNNING
,
&
dev
->
state
);
clear_bit
(
__
QUEUE_STATE_QDISC_RUNNING
,
&
txq
->
state
);
}
}
static
void
dev_watchdog
(
unsigned
long
arg
)
static
void
dev_watchdog
(
unsigned
long
arg
)
...
@@ -605,9 +605,10 @@ static void dev_deactivate_queue(struct netdev_queue *dev_queue,
...
@@ -605,9 +605,10 @@ static void dev_deactivate_queue(struct netdev_queue *dev_queue,
void
dev_deactivate
(
struct
net_device
*
dev
)
void
dev_deactivate
(
struct
net_device
*
dev
)
{
{
struct
netdev_queue
*
dev_queue
=
&
dev
->
tx_queue
;
int
running
;
int
running
;
dev_deactivate_queue
(
&
dev
->
tx
_queue
,
&
noop_qdisc
);
dev_deactivate_queue
(
dev
_queue
,
&
noop_qdisc
);
dev_watchdog_down
(
dev
);
dev_watchdog_down
(
dev
);
...
@@ -616,16 +617,17 @@ void dev_deactivate(struct net_device *dev)
...
@@ -616,16 +617,17 @@ void dev_deactivate(struct net_device *dev)
/* Wait for outstanding qdisc_run calls. */
/* Wait for outstanding qdisc_run calls. */
do
{
do
{
while
(
test_bit
(
__
LINK_STATE_QDISC_RUNNING
,
&
dev
->
state
))
while
(
test_bit
(
__
QUEUE_STATE_QDISC_RUNNING
,
&
dev_queue
->
state
))
yield
();
yield
();
/*
/*
* Double-check inside queue lock to ensure that all effects
* Double-check inside queue lock to ensure that all effects
* of the queue run are visible when we return.
* of the queue run are visible when we return.
*/
*/
spin_lock_bh
(
&
dev
->
tx_queue
.
lock
);
spin_lock_bh
(
&
dev_queue
->
lock
);
running
=
test_bit
(
__LINK_STATE_QDISC_RUNNING
,
&
dev
->
state
);
running
=
test_bit
(
__QUEUE_STATE_QDISC_RUNNING
,
spin_unlock_bh
(
&
dev
->
tx_queue
.
lock
);
&
dev_queue
->
state
);
spin_unlock_bh
(
&
dev_queue
->
lock
);
/*
/*
* The running flag should never be set at this point because
* The running flag should never be set at this point because
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment