Commit b7ccc7f8 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

mm: move lru union within struct page

Since the LRU is two words, this does not affect the double-word alignment
of SLUB's freelist.

Link: http://lkml.kernel.org/r/20180518194519.3820-10-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox <mawilcox@microsoft.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent fa3015b7
...@@ -72,6 +72,57 @@ struct hmm; ...@@ -72,6 +72,57 @@ struct hmm;
struct page { struct page {
unsigned long flags; /* Atomic flags, some possibly unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */ * updated asynchronously */
/*
* WARNING: bit 0 of the first word encode PageTail(). That means
* the rest users of the storage space MUST NOT use the bit to
* avoid collision and false-positive PageTail().
*/
union {
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone_lru_lock !
* Can be used as a generic list
* by the page owner.
*/
struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an
* lru or handled by a slab
* allocator, this points to the
* hosting device page map.
*/
struct { /* slub per cpu partial pages */
struct page *next; /* Next partial slab */
#ifdef CONFIG_64BIT
int pages; /* Nr of partial slabs left */
int pobjects; /* Approximate # of objects */
#else
short int pages;
short int pobjects;
#endif
};
struct rcu_head rcu_head; /* Used by SLAB
* when destroying via RCU
*/
/* Tail pages of compound page */
struct {
unsigned long compound_head; /* If bit zero is set */
/* First tail page only */
unsigned char compound_dtor;
unsigned char compound_order;
/* two/six bytes available here */
};
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
struct {
unsigned long __pad; /* do not overlay pmd_huge_pte
* with compound_head to avoid
* possible bit 0 collision.
*/
pgtable_t pmd_huge_pte; /* protected by page->ptl */
};
#endif
};
/* Three words (12/24 bytes) are available in this union. */ /* Three words (12/24 bytes) are available in this union. */
union { union {
struct { /* Page cache and anonymous pages */ struct { /* Page cache and anonymous pages */
...@@ -135,57 +186,6 @@ struct page { ...@@ -135,57 +186,6 @@ struct page {
/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */ /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
atomic_t _refcount; atomic_t _refcount;
/*
* WARNING: bit 0 of the first word encode PageTail(). That means
* the rest users of the storage space MUST NOT use the bit to
* avoid collision and false-positive PageTail().
*/
union {
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone_lru_lock !
* Can be used as a generic list
* by the page owner.
*/
struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an
* lru or handled by a slab
* allocator, this points to the
* hosting device page map.
*/
struct { /* slub per cpu partial pages */
struct page *next; /* Next partial slab */
#ifdef CONFIG_64BIT
int pages; /* Nr of partial slabs left */
int pobjects; /* Approximate # of objects */
#else
short int pages;
short int pobjects;
#endif
};
struct rcu_head rcu_head; /* Used by SLAB
* when destroying via RCU
*/
/* Tail pages of compound page */
struct {
unsigned long compound_head; /* If bit zero is set */
/* First tail page only */
unsigned char compound_dtor;
unsigned char compound_order;
/* two/six bytes available here */
};
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
struct {
unsigned long __pad; /* do not overlay pmd_huge_pte
* with compound_head to avoid
* possible bit 0 collision.
*/
pgtable_t pmd_huge_pte; /* protected by page->ptl */
};
#endif
};
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
struct mem_cgroup *mem_cgroup; struct mem_cgroup *mem_cgroup;
#endif #endif
......
...@@ -52,11 +52,11 @@ ...@@ -52,11 +52,11 @@
* and to synchronize major metadata changes to slab cache structures. * and to synchronize major metadata changes to slab cache structures.
* *
* The slab_lock is only used for debugging and on arches that do not * The slab_lock is only used for debugging and on arches that do not
* have the ability to do a cmpxchg_double. It only protects the second * have the ability to do a cmpxchg_double. It only protects:
* double word in the page struct. Meaning
* A. page->freelist -> List of object free in a page * A. page->freelist -> List of object free in a page
* B. page->counters -> Counters of objects * B. page->inuse -> Number of objects in use
* C. page->frozen -> frozen state * C. page->objects -> Number of objects in page
* D. page->frozen -> frozen state
* *
* If a slab is frozen then it is exempt from list management. It is not * If a slab is frozen then it is exempt from list management. It is not
* on any list. The processor that froze the slab is the one who can * on any list. The processor that froze the slab is the one who can
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment