Commit 12414d6e authored by David S. Miller's avatar David S. Miller

[TCP]: Restart tw bucket scan when lock is dropped, noticed by Olof Johansson.

parent 26d5aa5a
...@@ -263,7 +263,10 @@ static __inline__ int tw_del_dead_node(struct tcp_tw_bucket *tw) ...@@ -263,7 +263,10 @@ static __inline__ int tw_del_dead_node(struct tcp_tw_bucket *tw)
#define tw_for_each(tw, node, head) \ #define tw_for_each(tw, node, head) \
hlist_for_each_entry(tw, node, head, tw_node) hlist_for_each_entry(tw, node, head, tw_node)
#define tw_for_each_inmate(tw, node, safe, jail) \ #define tw_for_each_inmate(tw, node, jail) \
hlist_for_each_entry(tw, node, jail, tw_death_node)
#define tw_for_each_inmate_safe(tw, node, safe, jail) \
hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node) hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node)
#define tcptw_sk(__sk) ((struct tcp_tw_bucket *)(__sk)) #define tcptw_sk(__sk) ((struct tcp_tw_bucket *)(__sk))
......
...@@ -427,7 +427,7 @@ static u32 twkill_thread_slots; ...@@ -427,7 +427,7 @@ static u32 twkill_thread_slots;
static int tcp_do_twkill_work(int slot, unsigned int quota) static int tcp_do_twkill_work(int slot, unsigned int quota)
{ {
struct tcp_tw_bucket *tw; struct tcp_tw_bucket *tw;
struct hlist_node *node, *safe; struct hlist_node *node;
unsigned int killed; unsigned int killed;
int ret; int ret;
...@@ -439,8 +439,8 @@ static int tcp_do_twkill_work(int slot, unsigned int quota) ...@@ -439,8 +439,8 @@ static int tcp_do_twkill_work(int slot, unsigned int quota)
*/ */
killed = 0; killed = 0;
ret = 0; ret = 0;
tw_for_each_inmate(tw, node, safe, rescan:
&tcp_tw_death_row[slot]) { tw_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) {
__tw_del_dead_node(tw); __tw_del_dead_node(tw);
spin_unlock(&tw_death_lock); spin_unlock(&tw_death_lock);
tcp_timewait_kill(tw); tcp_timewait_kill(tw);
...@@ -451,6 +451,14 @@ static int tcp_do_twkill_work(int slot, unsigned int quota) ...@@ -451,6 +451,14 @@ static int tcp_do_twkill_work(int slot, unsigned int quota)
ret = 1; ret = 1;
break; break;
} }
/* While we dropped tw_death_lock, another cpu may have
* killed off the next TW bucket in the list, therefore
* do a fresh re-read of the hlist head node with the
* lock reacquired. We still use the hlist traversal
* macro in order to get the prefetches.
*/
goto rescan;
} }
tcp_tw_count -= killed; tcp_tw_count -= killed;
...@@ -637,7 +645,7 @@ void tcp_twcal_tick(unsigned long dummy) ...@@ -637,7 +645,7 @@ void tcp_twcal_tick(unsigned long dummy)
struct hlist_node *node, *safe; struct hlist_node *node, *safe;
struct tcp_tw_bucket *tw; struct tcp_tw_bucket *tw;
tw_for_each_inmate(tw, node, safe, tw_for_each_inmate_safe(tw, node, safe,
&tcp_twcal_row[slot]) { &tcp_twcal_row[slot]) {
__tw_del_dead_node(tw); __tw_del_dead_node(tw);
tcp_timewait_kill(tw); tcp_timewait_kill(tw);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment