Commit ebebbbe9 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

swapfile: rearrange scan and swap_info

Before making functional changes, rearrange scan_swap_map() to simplify
subsequent diffs.  Actually, there is one functional change in there:
leave cluster_nr negative while scanning for a new cluster - resetting it
early increased the likelihood that when we have difficulty finding a free
cluster, another task may come in and try doing exactly the same - just a
waste of cpu.

Before making functional changes, rearrange struct swap_info_struct
slightly: flags will be needed as an unsigned long (for wait_on_bit), next
is a good int to pair with prio, old_block_size is uninteresting so shift
it to the end.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 81e33971
...@@ -133,14 +133,14 @@ enum { ...@@ -133,14 +133,14 @@ enum {
* The in-memory structure used to track swap areas. * The in-memory structure used to track swap areas.
*/ */
struct swap_info_struct { struct swap_info_struct {
unsigned int flags; unsigned long flags;
int prio; /* swap priority */ int prio; /* swap priority */
int next; /* next entry on swap list */
struct file *swap_file; struct file *swap_file;
struct block_device *bdev; struct block_device *bdev;
struct list_head extent_list; struct list_head extent_list;
struct swap_extent *curr_swap_extent; struct swap_extent *curr_swap_extent;
unsigned old_block_size; unsigned short *swap_map;
unsigned short * swap_map;
unsigned int lowest_bit; unsigned int lowest_bit;
unsigned int highest_bit; unsigned int highest_bit;
unsigned int cluster_next; unsigned int cluster_next;
...@@ -148,7 +148,7 @@ struct swap_info_struct { ...@@ -148,7 +148,7 @@ struct swap_info_struct {
unsigned int pages; unsigned int pages;
unsigned int max; unsigned int max;
unsigned int inuse_pages; unsigned int inuse_pages;
int next; /* next entry on swap list */ unsigned int old_block_size;
}; };
struct swap_list_t { struct swap_list_t {
......
...@@ -89,7 +89,8 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) ...@@ -89,7 +89,8 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
static inline unsigned long scan_swap_map(struct swap_info_struct *si) static inline unsigned long scan_swap_map(struct swap_info_struct *si)
{ {
unsigned long offset, last_in_cluster; unsigned long offset;
unsigned long last_in_cluster;
int latency_ration = LATENCY_LIMIT; int latency_ration = LATENCY_LIMIT;
/* /*
...@@ -103,10 +104,13 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si) ...@@ -103,10 +104,13 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si)
*/ */
si->flags += SWP_SCANNING; si->flags += SWP_SCANNING;
if (unlikely(!si->cluster_nr)) { offset = si->cluster_next;
si->cluster_nr = SWAPFILE_CLUSTER - 1;
if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) if (unlikely(!si->cluster_nr--)) {
goto lowest; if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
si->cluster_nr = SWAPFILE_CLUSTER - 1;
goto checks;
}
spin_unlock(&swap_lock); spin_unlock(&swap_lock);
offset = si->lowest_bit; offset = si->lowest_bit;
...@@ -118,43 +122,47 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si) ...@@ -118,43 +122,47 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si)
last_in_cluster = offset + SWAPFILE_CLUSTER; last_in_cluster = offset + SWAPFILE_CLUSTER;
else if (offset == last_in_cluster) { else if (offset == last_in_cluster) {
spin_lock(&swap_lock); spin_lock(&swap_lock);
si->cluster_next = offset-SWAPFILE_CLUSTER+1; offset -= SWAPFILE_CLUSTER - 1;
goto cluster; si->cluster_next = offset;
si->cluster_nr = SWAPFILE_CLUSTER - 1;
goto checks;
} }
if (unlikely(--latency_ration < 0)) { if (unlikely(--latency_ration < 0)) {
cond_resched(); cond_resched();
latency_ration = LATENCY_LIMIT; latency_ration = LATENCY_LIMIT;
} }
} }
offset = si->lowest_bit;
spin_lock(&swap_lock); spin_lock(&swap_lock);
goto lowest; si->cluster_nr = SWAPFILE_CLUSTER - 1;
} }
si->cluster_nr--; checks:
cluster: if (!(si->flags & SWP_WRITEOK))
offset = si->cluster_next;
if (offset > si->highest_bit)
lowest: offset = si->lowest_bit;
checks: if (!(si->flags & SWP_WRITEOK))
goto no_page; goto no_page;
if (!si->highest_bit) if (!si->highest_bit)
goto no_page; goto no_page;
if (!si->swap_map[offset]) { if (offset > si->highest_bit)
if (offset == si->lowest_bit) offset = si->lowest_bit;
si->lowest_bit++; if (si->swap_map[offset])
if (offset == si->highest_bit) goto scan;
si->highest_bit--;
si->inuse_pages++; if (offset == si->lowest_bit)
if (si->inuse_pages == si->pages) { si->lowest_bit++;
si->lowest_bit = si->max; if (offset == si->highest_bit)
si->highest_bit = 0; si->highest_bit--;
} si->inuse_pages++;
si->swap_map[offset] = 1; if (si->inuse_pages == si->pages) {
si->cluster_next = offset + 1; si->lowest_bit = si->max;
si->flags -= SWP_SCANNING; si->highest_bit = 0;
return offset;
} }
si->swap_map[offset] = 1;
si->cluster_next = offset + 1;
si->flags -= SWP_SCANNING;
return offset;
scan:
spin_unlock(&swap_lock); spin_unlock(&swap_lock);
while (++offset <= si->highest_bit) { while (++offset <= si->highest_bit) {
if (!si->swap_map[offset]) { if (!si->swap_map[offset]) {
...@@ -167,7 +175,7 @@ checks: if (!(si->flags & SWP_WRITEOK)) ...@@ -167,7 +175,7 @@ checks: if (!(si->flags & SWP_WRITEOK))
} }
} }
spin_lock(&swap_lock); spin_lock(&swap_lock);
goto lowest; goto checks;
no_page: no_page:
si->flags -= SWP_SCANNING; si->flags -= SWP_SCANNING;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment