Commit 3a08cd52 authored by Matthew Wilcox's avatar Matthew Wilcox

radix tree: Remove multiorder support

All users have now been converted to the XArray.  Removing the support
reduces code size and ensures new users will use the XArray instead.
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent 542980aa
...@@ -96,7 +96,6 @@ static inline bool radix_tree_empty(const struct radix_tree_root *root) ...@@ -96,7 +96,6 @@ static inline bool radix_tree_empty(const struct radix_tree_root *root)
* @next_index: one beyond the last index for this chunk * @next_index: one beyond the last index for this chunk
* @tags: bit-mask for tag-iterating * @tags: bit-mask for tag-iterating
* @node: node that contains current slot * @node: node that contains current slot
* @shift: shift for the node that holds our slots
* *
* This radix tree iterator works in terms of "chunks" of slots. A chunk is a * This radix tree iterator works in terms of "chunks" of slots. A chunk is a
* subinterval of slots contained within one radix tree leaf node. It is * subinterval of slots contained within one radix tree leaf node. It is
...@@ -110,20 +109,8 @@ struct radix_tree_iter { ...@@ -110,20 +109,8 @@ struct radix_tree_iter {
unsigned long next_index; unsigned long next_index;
unsigned long tags; unsigned long tags;
struct radix_tree_node *node; struct radix_tree_node *node;
#ifdef CONFIG_RADIX_TREE_MULTIORDER
unsigned int shift;
#endif
}; };
static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
{
#ifdef CONFIG_RADIX_TREE_MULTIORDER
return iter->shift;
#else
return 0;
#endif
}
/** /**
* Radix-tree synchronization * Radix-tree synchronization
* *
...@@ -230,13 +217,8 @@ static inline int radix_tree_exception(void *arg) ...@@ -230,13 +217,8 @@ static inline int radix_tree_exception(void *arg)
return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
} }
int __radix_tree_insert(struct radix_tree_root *, unsigned long index, int radix_tree_insert(struct radix_tree_root *, unsigned long index,
unsigned order, void *); void *);
static inline int radix_tree_insert(struct radix_tree_root *root,
unsigned long index, void *entry)
{
return __radix_tree_insert(root, index, 0, entry);
}
void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index, void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index,
struct radix_tree_node **nodep, void __rcu ***slotp); struct radix_tree_node **nodep, void __rcu ***slotp);
void *radix_tree_lookup(const struct radix_tree_root *, unsigned long); void *radix_tree_lookup(const struct radix_tree_root *, unsigned long);
...@@ -384,7 +366,7 @@ void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter) ...@@ -384,7 +366,7 @@ void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter)
static inline unsigned long static inline unsigned long
__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
{ {
return iter->index + (slots << iter_shift(iter)); return iter->index + slots;
} }
/** /**
...@@ -409,20 +391,8 @@ void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot, ...@@ -409,20 +391,8 @@ void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot,
static __always_inline long static __always_inline long
radix_tree_chunk_size(struct radix_tree_iter *iter) radix_tree_chunk_size(struct radix_tree_iter *iter)
{ {
return (iter->next_index - iter->index) >> iter_shift(iter); return iter->next_index - iter->index;
}
#ifdef CONFIG_RADIX_TREE_MULTIORDER
void __rcu **__radix_tree_next_slot(void __rcu **slot,
struct radix_tree_iter *iter, unsigned flags);
#else
/* Can't happen without sibling entries, but the compiler can't tell that */
static inline void __rcu **__radix_tree_next_slot(void __rcu **slot,
struct radix_tree_iter *iter, unsigned flags)
{
return slot;
} }
#endif
/** /**
* radix_tree_next_slot - find next slot in chunk * radix_tree_next_slot - find next slot in chunk
...@@ -482,8 +452,6 @@ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, ...@@ -482,8 +452,6 @@ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
return NULL; return NULL;
found: found:
if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot))))
return __radix_tree_next_slot(slot, iter, flags);
return slot; return slot;
} }
......
...@@ -405,10 +405,6 @@ config XARRAY_MULTI ...@@ -405,10 +405,6 @@ config XARRAY_MULTI
Support entries which occupy multiple consecutive indices in the Support entries which occupy multiple consecutive indices in the
XArray. XArray.
config RADIX_TREE_MULTIORDER
bool
select XARRAY_MULTI
config ASSOCIATIVE_ARRAY config ASSOCIATIVE_ARRAY
bool bool
help help
......
This diff is collapsed.
...@@ -379,7 +379,7 @@ config TRANSPARENT_HUGEPAGE ...@@ -379,7 +379,7 @@ config TRANSPARENT_HUGEPAGE
bool "Transparent Hugepage Support" bool "Transparent Hugepage Support"
depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
select COMPACTION select COMPACTION
select RADIX_TREE_MULTIORDER select XARRAY_MULTI
help help
Transparent Hugepages allows the kernel to use huge pages and Transparent Hugepages allows the kernel to use huge pages and
huge tlb transparently to the applications whenever possible. huge tlb transparently to the applications whenever possible.
...@@ -671,7 +671,7 @@ config ZONE_DEVICE ...@@ -671,7 +671,7 @@ config ZONE_DEVICE
depends on MEMORY_HOTREMOVE depends on MEMORY_HOTREMOVE
depends on SPARSEMEM_VMEMMAP depends on SPARSEMEM_VMEMMAP
depends on ARCH_HAS_ZONE_DEVICE depends on ARCH_HAS_ZONE_DEVICE
select RADIX_TREE_MULTIORDER select XARRAY_MULTI
help help
Device memory hotplug support allows for establishing pmem, Device memory hotplug support allows for establishing pmem,
......
#define CONFIG_RADIX_TREE_MULTIORDER 1
#define CONFIG_XARRAY_MULTI 1 #define CONFIG_XARRAY_MULTI 1
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment