Commit cf393195 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'idr-4.11' of git://git.infradead.org/users/willy/linux-dax

Pull IDR rewrite from Matthew Wilcox:
 "The most significant part of the following is the patch to rewrite the
  IDR & IDA to be clients of the radix tree. But there's much more,
  including an enhancement of the IDA to be significantly more space
  efficient, an IDR & IDA test suite, some improvements to the IDR API
  (and driver changes to take advantage of those improvements), several
  improvements to the radix tree test suite and RCU annotations.

  The IDR & IDA rewrite had a good spin in linux-next and Andrew's tree
  for most of the last cycle. Coupled with the IDR test suite, I feel
  pretty confident that any remaining bugs are quite hard to hit. 0-day
  did a great job of watching my git tree and pointing out problems; as
  it hit them, I added new test-cases to be sure not to be caught the
  same way twice"

Willy goes on to expand a bit on the IDR rewrite rationale:
 "The radix tree and the IDR use very similar data structures.

  Merging the two codebases lets us share the memory allocation pools,
  and results in a net deletion of 500 lines of code. It also opens up
  the possibility of exposing more of the features of the radix tree to
  users of the IDR (and I have some interesting patches along those
  lines waiting for 4.12)

  It also shrinks the size of the 'struct idr' from 40 bytes to 24 which
  will shrink a fair few data structures that embed an IDR"

* 'idr-4.11' of git://git.infradead.org/users/willy/linux-dax: (32 commits)
  radix tree test suite: Add config option for map shift
  idr: Add missing __rcu annotations
  radix-tree: Fix __rcu annotations
  radix-tree: Add rcu_dereference and rcu_assign_pointer calls
  radix tree test suite: Run iteration tests for longer
  radix tree test suite: Fix split/join memory leaks
  radix tree test suite: Fix leaks in regression2.c
  radix tree test suite: Fix leaky tests
  radix tree test suite: Enable address sanitizer
  radix_tree_iter_resume: Fix out of bounds error
  radix-tree: Store a pointer to the root in each node
  radix-tree: Chain preallocated nodes through ->parent
  radix tree test suite: Dial down verbosity with -v
  radix tree test suite: Introduce kmalloc_verbose
  idr: Return the deleted entry from idr_remove
  radix tree test suite: Build separate binaries for some tests
  ida: Use exceptional entries for small IDAs
  ida: Move ida_bitmap to a percpu variable
  Reimplement IDR and IDA using the radix tree
  radix-tree: Add radix_tree_iter_delete
  ...
parents 5ecc5ac2 c6ce3e2f
...@@ -1980,13 +1980,12 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) ...@@ -1980,13 +1980,12 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
card->lbfqc = ns_stat_lfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat);
id = le32_to_cpu(rsqe->buffer_handle); id = le32_to_cpu(rsqe->buffer_handle);
skb = idr_find(&card->idr, id); skb = idr_remove(&card->idr, id);
if (!skb) { if (!skb) {
RXPRINTK(KERN_ERR RXPRINTK(KERN_ERR
"nicstar%d: idr_find() failed!\n", card->index); "nicstar%d: skb not found!\n", card->index);
return; return;
} }
idr_remove(&card->idr, id);
dma_sync_single_for_cpu(&card->pcidev->dev, dma_sync_single_for_cpu(&card->pcidev->dev,
NS_PRV_DMA(skb), NS_PRV_DMA(skb),
(NS_PRV_BUFTYPE(skb) == BUF_SM (NS_PRV_BUFTYPE(skb) == BUF_SM
......
...@@ -2915,12 +2915,10 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig ...@@ -2915,12 +2915,10 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
idr_remove(&connection->peer_devices, vnr); idr_remove(&connection->peer_devices, vnr);
out_idr_remove_from_resource: out_idr_remove_from_resource:
for_each_connection(connection, resource) { for_each_connection(connection, resource) {
peer_device = idr_find(&connection->peer_devices, vnr); peer_device = idr_remove(&connection->peer_devices, vnr);
if (peer_device) { if (peer_device)
idr_remove(&connection->peer_devices, vnr);
kref_put(&connection->kref, drbd_destroy_connection); kref_put(&connection->kref, drbd_destroy_connection);
} }
}
for_each_peer_device_safe(peer_device, tmp_peer_device, device) { for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
list_del(&peer_device->peer_devices); list_del(&peer_device->peer_devices);
kfree(peer_device); kfree(peer_device);
......
...@@ -1307,8 +1307,7 @@ static void iso_resource_work(struct work_struct *work) ...@@ -1307,8 +1307,7 @@ static void iso_resource_work(struct work_struct *work)
*/ */
if (r->todo == ISO_RES_REALLOC && !success && if (r->todo == ISO_RES_REALLOC && !success &&
!client->in_shutdown && !client->in_shutdown &&
idr_find(&client->resource_idr, r->resource.handle)) { idr_remove(&client->resource_idr, r->resource.handle)) {
idr_remove(&client->resource_idr, r->resource.handle);
client_put(client); client_put(client);
free = true; free = true;
} }
......
...@@ -70,10 +70,10 @@ static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id) ...@@ -70,10 +70,10 @@ static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
struct amdgpu_bo_list *list; struct amdgpu_bo_list *list;
mutex_lock(&fpriv->bo_list_lock); mutex_lock(&fpriv->bo_list_lock);
list = idr_find(&fpriv->bo_list_handles, id); list = idr_remove(&fpriv->bo_list_handles, id);
if (list) { if (list) {
/* Another user may have a reference to this list still */
mutex_lock(&list->lock); mutex_lock(&list->lock);
idr_remove(&fpriv->bo_list_handles, id);
mutex_unlock(&list->lock); mutex_unlock(&list->lock);
amdgpu_bo_list_free(list); amdgpu_bo_list_free(list);
} }
......
...@@ -135,15 +135,11 @@ static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) ...@@ -135,15 +135,11 @@ static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
struct amdgpu_ctx *ctx; struct amdgpu_ctx *ctx;
mutex_lock(&mgr->lock); mutex_lock(&mgr->lock);
ctx = idr_find(&mgr->ctx_handles, id); ctx = idr_remove(&mgr->ctx_handles, id);
if (ctx) { if (ctx)
idr_remove(&mgr->ctx_handles, id);
kref_put(&ctx->refcount, amdgpu_ctx_do_release); kref_put(&ctx->refcount, amdgpu_ctx_do_release);
mutex_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
return 0; return ctx ? 0 : -EINVAL;
}
mutex_unlock(&mgr->lock);
return -EINVAL;
} }
static int amdgpu_ctx_query(struct amdgpu_device *adev, static int amdgpu_ctx_query(struct amdgpu_device *adev,
......
...@@ -346,9 +346,7 @@ void mwifiex_parse_tx_status_event(struct mwifiex_private *priv, ...@@ -346,9 +346,7 @@ void mwifiex_parse_tx_status_event(struct mwifiex_private *priv,
return; return;
spin_lock_irqsave(&priv->ack_status_lock, flags); spin_lock_irqsave(&priv->ack_status_lock, flags);
ack_skb = idr_find(&priv->ack_status_frames, tx_status->tx_token_id); ack_skb = idr_remove(&priv->ack_status_frames, tx_status->tx_token_id);
if (ack_skb)
idr_remove(&priv->ack_status_frames, tx_status->tx_token_id);
spin_unlock_irqrestore(&priv->ack_status_lock, flags); spin_unlock_irqrestore(&priv->ack_status_lock, flags);
if (ack_skb) { if (ack_skb) {
......
...@@ -642,9 +642,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) ...@@ -642,9 +642,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
spin_lock(&udev->commands_lock); spin_lock(&udev->commands_lock);
cmd = idr_find(&udev->commands, entry->hdr.cmd_id); cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
if (cmd)
idr_remove(&udev->commands, cmd->cmd_id);
spin_unlock(&udev->commands_lock); spin_unlock(&udev->commands_lock);
if (!cmd) { if (!cmd) {
......
...@@ -12,47 +12,29 @@ ...@@ -12,47 +12,29 @@
#ifndef __IDR_H__ #ifndef __IDR_H__
#define __IDR_H__ #define __IDR_H__
#include <linux/types.h> #include <linux/radix-tree.h>
#include <linux/bitops.h> #include <linux/gfp.h>
#include <linux/init.h> #include <linux/percpu.h>
#include <linux/rcupdate.h>
struct idr {
struct radix_tree_root idr_rt;
unsigned int idr_next;
};
/* /*
* Using 6 bits at each layer allows us to allocate 7 layers out of each page. * The IDR API does not expose the tagging functionality of the radix tree
* 8 bits only gave us 3 layers out of every pair of pages, which is less * to users. Use tag 0 to track whether a node has free space below it.
* efficient except for trees with a largest element between 192-255 inclusive.
*/ */
#define IDR_BITS 6 #define IDR_FREE 0
#define IDR_SIZE (1 << IDR_BITS)
#define IDR_MASK ((1 << IDR_BITS)-1)
struct idr_layer {
int prefix; /* the ID prefix of this idr_layer */
int layer; /* distance from leaf */
struct idr_layer __rcu *ary[1<<IDR_BITS];
int count; /* When zero, we can release it */
union {
/* A zero bit means "space here" */
DECLARE_BITMAP(bitmap, IDR_SIZE);
struct rcu_head rcu_head;
};
};
struct idr { /* Set the IDR flag and the IDR_FREE tag */
struct idr_layer __rcu *hint; /* the last layer allocated from */ #define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT))
struct idr_layer __rcu *top;
int layers; /* only valid w/o concurrent changes */
int cur; /* current pos for cyclic allocation */
spinlock_t lock;
int id_free_cnt;
struct idr_layer *id_free;
};
#define IDR_INIT(name) \ #define IDR_INIT \
{ \ { \
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \
} }
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) #define DEFINE_IDR(name) struct idr name = IDR_INIT
/** /**
* idr_get_cursor - Return the current position of the cyclic allocator * idr_get_cursor - Return the current position of the cyclic allocator
...@@ -62,9 +44,9 @@ struct idr { ...@@ -62,9 +44,9 @@ struct idr {
* idr_alloc_cyclic() if it is free (otherwise the search will start from * idr_alloc_cyclic() if it is free (otherwise the search will start from
* this position). * this position).
*/ */
static inline unsigned int idr_get_cursor(struct idr *idr) static inline unsigned int idr_get_cursor(const struct idr *idr)
{ {
return READ_ONCE(idr->cur); return READ_ONCE(idr->idr_next);
} }
/** /**
...@@ -77,7 +59,7 @@ static inline unsigned int idr_get_cursor(struct idr *idr) ...@@ -77,7 +59,7 @@ static inline unsigned int idr_get_cursor(struct idr *idr)
*/ */
static inline void idr_set_cursor(struct idr *idr, unsigned int val) static inline void idr_set_cursor(struct idr *idr, unsigned int val)
{ {
WRITE_ONCE(idr->cur, val); WRITE_ONCE(idr->idr_next, val);
} }
/** /**
...@@ -97,22 +79,31 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val) ...@@ -97,22 +79,31 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val)
* period). * period).
*/ */
/*
* This is what we export.
*/
void *idr_find_slowpath(struct idr *idp, int id);
void idr_preload(gfp_t gfp_mask); void idr_preload(gfp_t gfp_mask);
int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); int idr_alloc(struct idr *, void *entry, int start, int end, gfp_t);
int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask); int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t);
int idr_for_each(struct idr *idp, int idr_for_each(const struct idr *,
int (*fn)(int id, void *p, void *data), void *data); int (*fn)(int id, void *p, void *data), void *data);
void *idr_get_next(struct idr *idp, int *nextid); void *idr_get_next(struct idr *, int *nextid);
void *idr_replace(struct idr *idp, void *ptr, int id); void *idr_replace(struct idr *, void *, int id);
void idr_remove(struct idr *idp, int id); void idr_destroy(struct idr *);
void idr_destroy(struct idr *idp);
void idr_init(struct idr *idp); static inline void *idr_remove(struct idr *idr, int id)
bool idr_is_empty(struct idr *idp); {
return radix_tree_delete_item(&idr->idr_rt, id, NULL);
}
static inline void idr_init(struct idr *idr)
{
INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
idr->idr_next = 0;
}
static inline bool idr_is_empty(const struct idr *idr)
{
return radix_tree_empty(&idr->idr_rt) &&
radix_tree_tagged(&idr->idr_rt, IDR_FREE);
}
/** /**
* idr_preload_end - end preload section started with idr_preload() * idr_preload_end - end preload section started with idr_preload()
...@@ -137,19 +128,14 @@ static inline void idr_preload_end(void) ...@@ -137,19 +128,14 @@ static inline void idr_preload_end(void)
* This function can be called under rcu_read_lock(), given that the leaf * This function can be called under rcu_read_lock(), given that the leaf
* pointers lifetimes are correctly managed. * pointers lifetimes are correctly managed.
*/ */
static inline void *idr_find(struct idr *idr, int id) static inline void *idr_find(const struct idr *idr, int id)
{ {
struct idr_layer *hint = rcu_dereference_raw(idr->hint); return radix_tree_lookup(&idr->idr_rt, id);
if (hint && (id & ~IDR_MASK) == hint->prefix)
return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
return idr_find_slowpath(idr, id);
} }
/** /**
* idr_for_each_entry - iterate over an idr's elements of a given type * idr_for_each_entry - iterate over an idr's elements of a given type
* @idp: idr handle * @idr: idr handle
* @entry: the type * to use as cursor * @entry: the type * to use as cursor
* @id: id entry's key * @id: id entry's key
* *
...@@ -157,57 +143,60 @@ static inline void *idr_find(struct idr *idr, int id) ...@@ -157,57 +143,60 @@ static inline void *idr_find(struct idr *idr, int id)
* after normal terminatinon @entry is left with the value NULL. This * after normal terminatinon @entry is left with the value NULL. This
* is convenient for a "not found" value. * is convenient for a "not found" value.
*/ */
#define idr_for_each_entry(idp, entry, id) \ #define idr_for_each_entry(idr, entry, id) \
for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
/** /**
* idr_for_each_entry - continue iteration over an idr's elements of a given type * idr_for_each_entry_continue - continue iteration over an idr's elements of a given type
* @idp: idr handle * @idr: idr handle
* @entry: the type * to use as cursor * @entry: the type * to use as cursor
* @id: id entry's key * @id: id entry's key
* *
* Continue to iterate over list of given type, continuing after * Continue to iterate over list of given type, continuing after
* the current position. * the current position.
*/ */
#define idr_for_each_entry_continue(idp, entry, id) \ #define idr_for_each_entry_continue(idr, entry, id) \
for ((entry) = idr_get_next((idp), &(id)); \ for ((entry) = idr_get_next((idr), &(id)); \
entry; \ entry; \
++id, (entry) = idr_get_next((idp), &(id))) ++id, (entry) = idr_get_next((idr), &(id)))
/* /*
* IDA - IDR based id allocator, use when translation from id to * IDA - IDR based id allocator, use when translation from id to
* pointer isn't necessary. * pointer isn't necessary.
*
* IDA_BITMAP_LONGS is calculated to be one less to accommodate
* ida_bitmap->nr_busy so that the whole struct fits in 128 bytes.
*/ */
#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1) #define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
struct ida_bitmap { struct ida_bitmap {
long nr_busy;
unsigned long bitmap[IDA_BITMAP_LONGS]; unsigned long bitmap[IDA_BITMAP_LONGS];
}; };
DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap);
struct ida { struct ida {
struct idr idr; struct radix_tree_root ida_rt;
struct ida_bitmap *free_bitmap;
}; };
#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, } #define IDA_INIT { \
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) .ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \
}
#define DEFINE_IDA(name) struct ida name = IDA_INIT
int ida_pre_get(struct ida *ida, gfp_t gfp_mask); int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
void ida_remove(struct ida *ida, int id); void ida_remove(struct ida *ida, int id);
void ida_destroy(struct ida *ida); void ida_destroy(struct ida *ida);
void ida_init(struct ida *ida);
int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
gfp_t gfp_mask); gfp_t gfp_mask);
void ida_simple_remove(struct ida *ida, unsigned int id); void ida_simple_remove(struct ida *ida, unsigned int id);
static inline void ida_init(struct ida *ida)
{
INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT);
}
/** /**
* ida_get_new - allocate new ID * ida_get_new - allocate new ID
* @ida: idr handle * @ida: idr handle
...@@ -220,11 +209,8 @@ static inline int ida_get_new(struct ida *ida, int *p_id) ...@@ -220,11 +209,8 @@ static inline int ida_get_new(struct ida *ida, int *p_id)
return ida_get_new_above(ida, 0, p_id); return ida_get_new_above(ida, 0, p_id);
} }
static inline bool ida_is_empty(struct ida *ida) static inline bool ida_is_empty(const struct ida *ida)
{ {
return idr_is_empty(&ida->idr); return radix_tree_empty(&ida->ida_rt);
} }
void __init idr_init_cache(void);
#endif /* __IDR_H__ */ #endif /* __IDR_H__ */
...@@ -22,11 +22,13 @@ ...@@ -22,11 +22,13 @@
#define _LINUX_RADIX_TREE_H #define _LINUX_RADIX_TREE_H
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/preempt.h>
#include <linux/types.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/list.h>
#include <linux/preempt.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <linux/types.h>
/* /*
* The bottom two bits of the slot determine how the remaining bits in the * The bottom two bits of the slot determine how the remaining bits in the
...@@ -94,7 +96,7 @@ struct radix_tree_node { ...@@ -94,7 +96,7 @@ struct radix_tree_node {
unsigned char count; /* Total entry count */ unsigned char count; /* Total entry count */
unsigned char exceptional; /* Exceptional entry count */ unsigned char exceptional; /* Exceptional entry count */
struct radix_tree_node *parent; /* Used when ascending tree */ struct radix_tree_node *parent; /* Used when ascending tree */
void *private_data; /* For tree user */ struct radix_tree_root *root; /* The tree we belong to */
union { union {
struct list_head private_list; /* For tree user */ struct list_head private_list; /* For tree user */
struct rcu_head rcu_head; /* Used when freeing node */ struct rcu_head rcu_head; /* Used when freeing node */
...@@ -103,7 +105,10 @@ struct radix_tree_node { ...@@ -103,7 +105,10 @@ struct radix_tree_node {
unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
}; };
/* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ /* The top bits of gfp_mask are used to store the root tags and the IDR flag */
#define ROOT_IS_IDR ((__force gfp_t)(1 << __GFP_BITS_SHIFT))
#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT + 1)
struct radix_tree_root { struct radix_tree_root {
gfp_t gfp_mask; gfp_t gfp_mask;
struct radix_tree_node __rcu *rnode; struct radix_tree_node __rcu *rnode;
...@@ -123,7 +128,7 @@ do { \ ...@@ -123,7 +128,7 @@ do { \
(root)->rnode = NULL; \ (root)->rnode = NULL; \
} while (0) } while (0)
static inline bool radix_tree_empty(struct radix_tree_root *root) static inline bool radix_tree_empty(const struct radix_tree_root *root)
{ {
return root->rnode == NULL; return root->rnode == NULL;
} }
...@@ -217,9 +222,7 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter) ...@@ -217,9 +222,7 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
/** /**
* radix_tree_deref_slot - dereference a slot * radix_tree_deref_slot - dereference a slot
* @pslot: pointer to slot, returned by radix_tree_lookup_slot * @slot: slot pointer, returned by radix_tree_lookup_slot
* Returns: item that was stored in that slot with any direct pointer flag
* removed.
* *
* For use with radix_tree_lookup_slot(). Caller must hold tree at least read * For use with radix_tree_lookup_slot(). Caller must hold tree at least read
* locked across slot lookup and dereference. Not required if write lock is * locked across slot lookup and dereference. Not required if write lock is
...@@ -227,26 +230,27 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter) ...@@ -227,26 +230,27 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
* *
* radix_tree_deref_retry must be used to confirm validity of the pointer if * radix_tree_deref_retry must be used to confirm validity of the pointer if
* only the read lock is held. * only the read lock is held.
*
* Return: entry stored in that slot.
*/ */
static inline void *radix_tree_deref_slot(void **pslot) static inline void *radix_tree_deref_slot(void __rcu **slot)
{ {
return rcu_dereference(*pslot); return rcu_dereference(*slot);
} }
/** /**
* radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held * radix_tree_deref_slot_protected - dereference a slot with tree lock held
* @pslot: pointer to slot, returned by radix_tree_lookup_slot * @slot: slot pointer, returned by radix_tree_lookup_slot
* Returns: item that was stored in that slot with any direct pointer flag *
* removed. * Similar to radix_tree_deref_slot. The caller does not hold the RCU read
* * lock but it must hold the tree lock to prevent parallel updates.
* Similar to radix_tree_deref_slot but only used during migration when a pages *
* mapping is being moved. The caller does not hold the RCU read lock but it * Return: entry stored in that slot.
* must hold the tree lock to prevent parallel updates.
*/ */
static inline void *radix_tree_deref_slot_protected(void **pslot, static inline void *radix_tree_deref_slot_protected(void __rcu **slot,
spinlock_t *treelock) spinlock_t *treelock)
{ {
return rcu_dereference_protected(*pslot, lockdep_is_held(treelock)); return rcu_dereference_protected(*slot, lockdep_is_held(treelock));
} }
/** /**
...@@ -282,9 +286,9 @@ static inline int radix_tree_exception(void *arg) ...@@ -282,9 +286,9 @@ static inline int radix_tree_exception(void *arg)
return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
} }
int __radix_tree_create(struct radix_tree_root *root, unsigned long index, int __radix_tree_create(struct radix_tree_root *, unsigned long index,
unsigned order, struct radix_tree_node **nodep, unsigned order, struct radix_tree_node **nodep,
void ***slotp); void __rcu ***slotp);
int __radix_tree_insert(struct radix_tree_root *, unsigned long index, int __radix_tree_insert(struct radix_tree_root *, unsigned long index,
unsigned order, void *); unsigned order, void *);
static inline int radix_tree_insert(struct radix_tree_root *root, static inline int radix_tree_insert(struct radix_tree_root *root,
...@@ -292,55 +296,56 @@ static inline int radix_tree_insert(struct radix_tree_root *root, ...@@ -292,55 +296,56 @@ static inline int radix_tree_insert(struct radix_tree_root *root,
{ {
return __radix_tree_insert(root, index, 0, entry); return __radix_tree_insert(root, index, 0, entry);
} }
void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index,
struct radix_tree_node **nodep, void ***slotp); struct radix_tree_node **nodep, void __rcu ***slotp);
void *radix_tree_lookup(struct radix_tree_root *, unsigned long); void *radix_tree_lookup(const struct radix_tree_root *, unsigned long);
void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *,
unsigned long index);
typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *); typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *);
void __radix_tree_replace(struct radix_tree_root *root, void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *,
struct radix_tree_node *node, void __rcu **slot, void *entry,
void **slot, void *item,
radix_tree_update_node_t update_node, void *private); radix_tree_update_node_t update_node, void *private);
void radix_tree_iter_replace(struct radix_tree_root *, void radix_tree_iter_replace(struct radix_tree_root *,
const struct radix_tree_iter *, void **slot, void *item); const struct radix_tree_iter *, void __rcu **slot, void *entry);
void radix_tree_replace_slot(struct radix_tree_root *root, void radix_tree_replace_slot(struct radix_tree_root *,
void **slot, void *item); void __rcu **slot, void *entry);
void __radix_tree_delete_node(struct radix_tree_root *root, void __radix_tree_delete_node(struct radix_tree_root *,
struct radix_tree_node *node, struct radix_tree_node *,
radix_tree_update_node_t update_node, radix_tree_update_node_t update_node,
void *private); void *private);
void radix_tree_iter_delete(struct radix_tree_root *,
struct radix_tree_iter *iter, void __rcu **slot);
void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_delete(struct radix_tree_root *, unsigned long); void *radix_tree_delete(struct radix_tree_root *, unsigned long);
void radix_tree_clear_tags(struct radix_tree_root *root, void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *,
struct radix_tree_node *node, void __rcu **slot);
void **slot); unsigned int radix_tree_gang_lookup(const struct radix_tree_root *,
unsigned int radix_tree_gang_lookup(struct radix_tree_root *root,
void **results, unsigned long first_index, void **results, unsigned long first_index,
unsigned int max_items); unsigned int max_items);
unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *,
void ***results, unsigned long *indices, void __rcu ***results, unsigned long *indices,
unsigned long first_index, unsigned int max_items); unsigned long first_index, unsigned int max_items);
int radix_tree_preload(gfp_t gfp_mask); int radix_tree_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
void radix_tree_init(void); void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *root, void *radix_tree_tag_set(struct radix_tree_root *,
unsigned long index, unsigned int tag); unsigned long index, unsigned int tag);
void *radix_tree_tag_clear(struct radix_tree_root *root, void *radix_tree_tag_clear(struct radix_tree_root *,
unsigned long index, unsigned int tag); unsigned long index, unsigned int tag);
int radix_tree_tag_get(struct radix_tree_root *root, int radix_tree_tag_get(const struct radix_tree_root *,
unsigned long index, unsigned int tag); unsigned long index, unsigned int tag);
void radix_tree_iter_tag_set(struct radix_tree_root *root, void radix_tree_iter_tag_set(struct radix_tree_root *,
const struct radix_tree_iter *iter, unsigned int tag); const struct radix_tree_iter *iter, unsigned int tag);
unsigned int void radix_tree_iter_tag_clear(struct radix_tree_root *,
radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, const struct radix_tree_iter *iter, unsigned int tag);
unsigned long first_index, unsigned int max_items, unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *,
unsigned int tag); void **results, unsigned long first_index,
unsigned int unsigned int max_items, unsigned int tag);
radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *,
unsigned long first_index, unsigned int max_items, void __rcu ***results, unsigned long first_index,
unsigned int tag); unsigned int max_items, unsigned int tag);
int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
static inline void radix_tree_preload_end(void) static inline void radix_tree_preload_end(void)
{ {
...@@ -352,10 +357,14 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index, ...@@ -352,10 +357,14 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index,
unsigned new_order); unsigned new_order);
int radix_tree_join(struct radix_tree_root *, unsigned long index, int radix_tree_join(struct radix_tree_root *, unsigned long index,
unsigned new_order, void *); unsigned new_order, void *);
void __rcu **idr_get_free(struct radix_tree_root *, struct radix_tree_iter *,
gfp_t, int end);
#define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ enum {
#define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */
#define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */ RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */
RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */
};
/** /**
* radix_tree_iter_init - initialize radix tree iterator * radix_tree_iter_init - initialize radix tree iterator
...@@ -364,7 +373,7 @@ int radix_tree_join(struct radix_tree_root *, unsigned long index, ...@@ -364,7 +373,7 @@ int radix_tree_join(struct radix_tree_root *, unsigned long index,
* @start: iteration starting index * @start: iteration starting index
* Returns: NULL * Returns: NULL
*/ */
static __always_inline void ** static __always_inline void __rcu **
radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
{ {
/* /*
...@@ -393,9 +402,45 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) ...@@ -393,9 +402,45 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
* Also it fills @iter with data about chunk: position in the tree (index), * Also it fills @iter with data about chunk: position in the tree (index),
* its end (next_index), and constructs a bit mask for tagged iterating (tags). * its end (next_index), and constructs a bit mask for tagged iterating (tags).
*/ */
void **radix_tree_next_chunk(struct radix_tree_root *root, void __rcu **radix_tree_next_chunk(const struct radix_tree_root *,
struct radix_tree_iter *iter, unsigned flags); struct radix_tree_iter *iter, unsigned flags);
/**
* radix_tree_iter_lookup - look up an index in the radix tree
* @root: radix tree root
* @iter: iterator state
* @index: key to look up
*
* If @index is present in the radix tree, this function returns the slot
* containing it and updates @iter to describe the entry. If @index is not
* present, it returns NULL.
*/
static inline void __rcu **
radix_tree_iter_lookup(const struct radix_tree_root *root,
struct radix_tree_iter *iter, unsigned long index)
{
radix_tree_iter_init(iter, index);
return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG);
}
/**
* radix_tree_iter_find - find a present entry
* @root: radix tree root
* @iter: iterator state
* @index: start location
*
* This function returns the slot containing the entry with the lowest index
* which is at least @index. If @index is larger than any present entry, this
* function returns NULL. The @iter is updated to describe the entry found.
*/
static inline void __rcu **
radix_tree_iter_find(const struct radix_tree_root *root,
struct radix_tree_iter *iter, unsigned long index)
{
radix_tree_iter_init(iter, index);
return radix_tree_next_chunk(root, iter, 0);
}
/** /**
* radix_tree_iter_retry - retry this chunk of the iteration * radix_tree_iter_retry - retry this chunk of the iteration
* @iter: iterator state * @iter: iterator state
...@@ -406,7 +451,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, ...@@ -406,7 +451,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
* and continue the iteration. * and continue the iteration.
*/ */
static inline __must_check static inline __must_check
void **radix_tree_iter_retry(struct radix_tree_iter *iter) void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter)
{ {
iter->next_index = iter->index; iter->next_index = iter->index;
iter->tags = 0; iter->tags = 0;
...@@ -429,7 +474,7 @@ __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) ...@@ -429,7 +474,7 @@ __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
* have been invalidated by an insertion or deletion. Call this function * have been invalidated by an insertion or deletion. Call this function
* before releasing the lock to continue the iteration from the next index. * before releasing the lock to continue the iteration from the next index.
*/ */
void **__must_check radix_tree_iter_resume(void **slot, void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot,
struct radix_tree_iter *iter); struct radix_tree_iter *iter);
/** /**
...@@ -445,11 +490,11 @@ radix_tree_chunk_size(struct radix_tree_iter *iter) ...@@ -445,11 +490,11 @@ radix_tree_chunk_size(struct radix_tree_iter *iter)
} }
#ifdef CONFIG_RADIX_TREE_MULTIORDER #ifdef CONFIG_RADIX_TREE_MULTIORDER
void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, void __rcu **__radix_tree_next_slot(void __rcu **slot,
unsigned flags); struct radix_tree_iter *iter, unsigned flags);
#else #else
/* Can't happen without sibling entries, but the compiler can't tell that */ /* Can't happen without sibling entries, but the compiler can't tell that */
static inline void ** __radix_tree_next_slot(void **slot, static inline void __rcu **__radix_tree_next_slot(void __rcu **slot,
struct radix_tree_iter *iter, unsigned flags) struct radix_tree_iter *iter, unsigned flags)
{ {
return slot; return slot;
...@@ -475,8 +520,8 @@ static inline void ** __radix_tree_next_slot(void **slot, ...@@ -475,8 +520,8 @@ static inline void ** __radix_tree_next_slot(void **slot,
* b) we are doing non-tagged iteration, and iter->index and iter->next_index * b) we are doing non-tagged iteration, and iter->index and iter->next_index
* have been set up so that radix_tree_chunk_size() returns 1 or 0. * have been set up so that radix_tree_chunk_size() returns 1 or 0.
*/ */
static __always_inline void ** static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) struct radix_tree_iter *iter, unsigned flags)
{ {
if (flags & RADIX_TREE_ITER_TAGGED) { if (flags & RADIX_TREE_ITER_TAGGED) {
iter->tags >>= 1; iter->tags >>= 1;
...@@ -514,7 +559,7 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) ...@@ -514,7 +559,7 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
return NULL; return NULL;
found: found:
if (unlikely(radix_tree_is_internal_node(*slot))) if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot))))
return __radix_tree_next_slot(slot, iter, flags); return __radix_tree_next_slot(slot, iter, flags);
return slot; return slot;
} }
......
...@@ -554,7 +554,7 @@ asmlinkage __visible void __init start_kernel(void) ...@@ -554,7 +554,7 @@ asmlinkage __visible void __init start_kernel(void)
if (WARN(!irqs_disabled(), if (WARN(!irqs_disabled(),
"Interrupts were enabled *very* early, fixing it\n")) "Interrupts were enabled *very* early, fixing it\n"))
local_irq_disable(); local_irq_disable();
idr_init_cache(); radix_tree_init();
/* /*
* Allow workqueue creation and work item queueing/cancelling * Allow workqueue creation and work item queueing/cancelling
...@@ -569,7 +569,6 @@ asmlinkage __visible void __init start_kernel(void) ...@@ -569,7 +569,6 @@ asmlinkage __visible void __init start_kernel(void)
trace_init(); trace_init();
context_tracking_init(); context_tracking_init();
radix_tree_init();
/* init some links before init_ISA_irqs() */ /* init some links before init_ISA_irqs() */
early_irq_init(); early_irq_init();
init_IRQ(); init_IRQ();
......
...@@ -25,6 +25,9 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ ...@@ -25,6 +25,9 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
earlycpio.o seq_buf.o siphash.o \ earlycpio.o seq_buf.o siphash.o \
nmi_backtrace.o nodemask.o win_minmax.o nmi_backtrace.o nodemask.o win_minmax.o
CFLAGS_radix-tree.o += -DCONFIG_SPARSE_RCU_POINTER
CFLAGS_idr.o += -DCONFIG_SPARSE_RCU_POINTER
lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o lib-$(CONFIG_SMP) += cpumask.o
lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o
......
/* #include <linux/bitmap.h>
* 2002-10-18 written by Jim Houston jim.houston@ccur.com
* Copyright (C) 2002 by Concurrent Computer Corporation
* Distributed under the GNU GPL license version 2.
*
* Modified by George Anzinger to reuse immediately and to use
* find bit instructions. Also removed _irq on spinlocks.
*
* Modified by Nadia Derbey to make it RCU safe.
*
* Small id to pointer translation service.
*
* It uses a radix tree like structure as a sparse array indexed
* by the id to obtain the pointer. The bitmap makes allocating
* a new id quick.
*
* You call it to allocate an id (an int) an associate with that id a
* pointer or what ever, we treat it as a (void *). You can pass this
* id to a user for him to pass back at a later time. You then pass
* that id to this code and it returns your pointer.
*/
#ifndef TEST // to test in user space...
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/export.h> #include <linux/export.h>
#endif
#include <linux/err.h>
#include <linux/string.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/percpu.h>
#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
/* Leave the possibility of an incomplete final layer */
#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
/* Number of id_layer structs to leave in free list */
#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
static struct kmem_cache *idr_layer_cache;
static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
static DEFINE_PER_CPU(int, idr_preload_cnt);
static DEFINE_SPINLOCK(simple_ida_lock); static DEFINE_SPINLOCK(simple_ida_lock);
/* the maximum ID which can be allocated given idr->layers */
static int idr_max(int layers)
{
int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
return (1 << bits) - 1;
}
/*
* Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is
* all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and
* so on.
*/
static int idr_layer_prefix_mask(int layer)
{
return ~idr_max(layer + 1);
}
static struct idr_layer *get_from_free_list(struct idr *idp)
{
struct idr_layer *p;
unsigned long flags;
spin_lock_irqsave(&idp->lock, flags);
if ((p = idp->id_free)) {
idp->id_free = p->ary[0];
idp->id_free_cnt--;
p->ary[0] = NULL;
}
spin_unlock_irqrestore(&idp->lock, flags);
return(p);
}
/**
* idr_layer_alloc - allocate a new idr_layer
* @gfp_mask: allocation mask
* @layer_idr: optional idr to allocate from
*
* If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
* one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch
* an idr_layer from @idr->id_free.
*
* @layer_idr is to maintain backward compatibility with the old alloc
* interface - idr_pre_get() and idr_get_new*() - and will be removed
* together with per-pool preload buffer.
*/
static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
{
struct idr_layer *new;
/* this is the old path, bypass to get_from_free_list() */
if (layer_idr)
return get_from_free_list(layer_idr);
/*
* Try to allocate directly from kmem_cache. We want to try this
* before preload buffer; otherwise, non-preloading idr_alloc()
* users will end up taking advantage of preloading ones. As the
* following is allowed to fail for preloaded cases, suppress
* warning this time.
*/
new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
if (new)
return new;
/*
* Try to fetch one from the per-cpu preload buffer if in process
* context. See idr_preload() for details.
*/
if (!in_interrupt()) {
preempt_disable();
new = __this_cpu_read(idr_preload_head);
if (new) {
__this_cpu_write(idr_preload_head, new->ary[0]);
__this_cpu_dec(idr_preload_cnt);
new->ary[0] = NULL;
}
preempt_enable();
if (new)
return new;
}
/*
* Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so
* that memory allocation failure warning is printed as intended.
*/
return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
}
static void idr_layer_rcu_free(struct rcu_head *head)
{
struct idr_layer *layer;
layer = container_of(head, struct idr_layer, rcu_head);
kmem_cache_free(idr_layer_cache, layer);
}
static inline void free_layer(struct idr *idr, struct idr_layer *p)
{
if (idr->hint == p)
RCU_INIT_POINTER(idr->hint, NULL);
call_rcu(&p->rcu_head, idr_layer_rcu_free);
}
/* only called when idp->lock is held */
static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
{
p->ary[0] = idp->id_free;
idp->id_free = p;
idp->id_free_cnt++;
}
static void move_to_free_list(struct idr *idp, struct idr_layer *p)
{
unsigned long flags;
/*
* Depends on the return element being zeroed.
*/
spin_lock_irqsave(&idp->lock, flags);
__move_to_free_list(idp, p);
spin_unlock_irqrestore(&idp->lock, flags);
}
static void idr_mark_full(struct idr_layer **pa, int id)
{
struct idr_layer *p = pa[0];
int l = 0;
__set_bit(id & IDR_MASK, p->bitmap);
/*
* If this layer is full mark the bit in the layer above to
* show that this part of the radix tree is full. This may
* complete the layer above and require walking up the radix
* tree.
*/
while (bitmap_full(p->bitmap, IDR_SIZE)) {
if (!(p = pa[++l]))
break;
id = id >> IDR_BITS;
__set_bit((id & IDR_MASK), p->bitmap);
}
}
static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
while (idp->id_free_cnt < MAX_IDR_FREE) {
struct idr_layer *new;
new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
if (new == NULL)
return (0);
move_to_free_list(idp, new);
}
return 1;
}
/**
* sub_alloc - try to allocate an id without growing the tree depth
* @idp: idr handle
* @starting_id: id to start search at
* @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
* @gfp_mask: allocation mask for idr_layer_alloc()
* @layer_idr: optional idr passed to idr_layer_alloc()
*
* Allocate an id in range [@starting_id, INT_MAX] from @idp without
* growing its depth. Returns
*
* the allocated id >= 0 if successful,
* -EAGAIN if the tree needs to grow for allocation to succeed,
* -ENOSPC if the id space is exhausted,
* -ENOMEM if more idr_layers need to be allocated.
*/
static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
gfp_t gfp_mask, struct idr *layer_idr)
{
int n, m, sh;
struct idr_layer *p, *new;
int l, id, oid;
id = *starting_id;
restart:
p = idp->top;
l = idp->layers;
pa[l--] = NULL;
while (1) {
/*
* We run around this while until we reach the leaf node...
*/
n = (id >> (IDR_BITS*l)) & IDR_MASK;
m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
if (m == IDR_SIZE) {
/* no space available go back to previous layer. */
l++;
oid = id;
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
/* if already at the top layer, we need to grow */
if (id > idr_max(idp->layers)) {
*starting_id = id;
return -EAGAIN;
}
p = pa[l];
BUG_ON(!p);
/* If we need to go up one layer, continue the
* loop; otherwise, restart from the top.
*/
sh = IDR_BITS * (l + 1);
if (oid >> sh == id >> sh)
continue;
else
goto restart;
}
if (m != n) {
sh = IDR_BITS*l;
id = ((id >> sh) ^ n ^ m) << sh;
}
if ((id >= MAX_IDR_BIT) || (id < 0))
return -ENOSPC;
if (l == 0)
break;
/*
* Create the layer below if it is missing.
*/
if (!p->ary[m]) {
new = idr_layer_alloc(gfp_mask, layer_idr);
if (!new)
return -ENOMEM;
new->layer = l-1;
new->prefix = id & idr_layer_prefix_mask(new->layer);
rcu_assign_pointer(p->ary[m], new);
p->count++;
}
pa[l--] = p;
p = p->ary[m];
}
pa[l] = p;
return id;
}
static int idr_get_empty_slot(struct idr *idp, int starting_id,
struct idr_layer **pa, gfp_t gfp_mask,
struct idr *layer_idr)
{
struct idr_layer *p, *new;
int layers, v, id;
unsigned long flags;
id = starting_id;
build_up:
p = idp->top;
layers = idp->layers;
if (unlikely(!p)) {
if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
return -ENOMEM;
p->layer = 0;
layers = 1;
}
/*
* Add a new layer to the top of the tree if the requested
* id is larger than the currently allocated space.
*/
while (id > idr_max(layers)) {
layers++;
if (!p->count) {
/* special case: if the tree is currently empty,
* then we grow the tree by moving the top node
* upwards.
*/
p->layer++;
WARN_ON_ONCE(p->prefix);
continue;
}
if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
/*
* The allocation failed. If we built part of
* the structure tear it down.
*/
spin_lock_irqsave(&idp->lock, flags);
for (new = p; p && p != idp->top; new = p) {
p = p->ary[0];
new->ary[0] = NULL;
new->count = 0;
bitmap_clear(new->bitmap, 0, IDR_SIZE);
__move_to_free_list(idp, new);
}
spin_unlock_irqrestore(&idp->lock, flags);
return -ENOMEM;
}
new->ary[0] = p;
new->count = 1;
new->layer = layers-1;
new->prefix = id & idr_layer_prefix_mask(new->layer);
if (bitmap_full(p->bitmap, IDR_SIZE))
__set_bit(0, new->bitmap);
p = new;
}
rcu_assign_pointer(idp->top, p);
idp->layers = layers;
v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
if (v == -EAGAIN)
goto build_up;
return(v);
}
/*
* @id and @pa are from a successful allocation from idr_get_empty_slot().
* Install the user pointer @ptr and mark the slot full.
*/
static void idr_fill_slot(struct idr *idr, void *ptr, int id,
struct idr_layer **pa)
{
/* update hint used for lookup, cleared from free_layer() */
rcu_assign_pointer(idr->hint, pa[0]);
rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
pa[0]->count++;
idr_mark_full(pa, id);
}
/** /**
* idr_preload - preload for idr_alloc() * idr_alloc - allocate an id
* @gfp_mask: allocation mask to use for preloading * @idr: idr handle
*
* Preload per-cpu layer buffer for idr_alloc(). Can only be used from
* process context and each idr_preload() invocation should be matched with
* idr_preload_end(). Note that preemption is disabled while preloaded.
*
* The first idr_alloc() in the preloaded section can be treated as if it
* were invoked with @gfp_mask used for preloading. This allows using more
* permissive allocation masks for idrs protected by spinlocks.
*
* For example, if idr_alloc() below fails, the failure can be treated as
* if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
*
* idr_preload(GFP_KERNEL);
* spin_lock(lock);
*
* id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
*
* spin_unlock(lock);
* idr_preload_end();
* if (id < 0)
* error;
*/
void idr_preload(gfp_t gfp_mask)
{
/*
* Consuming preload buffer from non-process context breaks preload
* allocation guarantee. Disallow usage from those contexts.
*/
WARN_ON_ONCE(in_interrupt());
might_sleep_if(gfpflags_allow_blocking(gfp_mask));
preempt_disable();
/*
* idr_alloc() is likely to succeed w/o full idr_layer buffer and
* return value from idr_alloc() needs to be checked for failure
* anyway. Silently give up if allocation fails. The caller can
* treat failures from idr_alloc() as if idr_alloc() were called
* with @gfp_mask which should be enough.
*/
while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
struct idr_layer *new;
preempt_enable();
new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
preempt_disable();
if (!new)
break;
/* link the new one to per-cpu preload list */
new->ary[0] = __this_cpu_read(idr_preload_head);
__this_cpu_write(idr_preload_head, new);
__this_cpu_inc(idr_preload_cnt);
}
}
EXPORT_SYMBOL(idr_preload);
/**
* idr_alloc - allocate new idr entry
* @idr: the (initialized) idr
* @ptr: pointer to be associated with the new id * @ptr: pointer to be associated with the new id
* @start: the minimum id (inclusive) * @start: the minimum id (inclusive)
* @end: the maximum id (exclusive, <= 0 for max) * @end: the maximum id (exclusive)
* @gfp_mask: memory allocation flags * @gfp: memory allocation flags
* *
* Allocate an id in [start, end) and associate it with @ptr. If no ID is * Allocates an unused ID in the range [start, end). Returns -ENOSPC
* available in the specified range, returns -ENOSPC. On memory allocation * if there are no unused IDs in that range.
* failure, returns -ENOMEM.
* *
* Note that @end is treated as max when <= 0. This is to always allow * Note that @end is treated as max when <= 0. This is to always allow
* using @start + N as @end as long as N is inside integer range. * using @start + N as @end as long as N is inside integer range.
* *
* The user is responsible for exclusively synchronizing all operations * Simultaneous modifications to the @idr are not allowed and should be
* which may modify @idr. However, read-only accesses such as idr_find() * prevented by the user, usually with a lock. idr_alloc() may be called
* or iteration can be performed under RCU read lock provided the user * concurrently with read-only accesses to the @idr, such as idr_find() and
* destroys @ptr in RCU-safe way after removal from idr. * idr_for_each_entry().
*/ */
int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
{ {
int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ void __rcu **slot;
struct idr_layer *pa[MAX_IDR_LEVEL + 1]; struct radix_tree_iter iter;
int id;
might_sleep_if(gfpflags_allow_blocking(gfp_mask));
/* sanity checks */
if (WARN_ON_ONCE(start < 0)) if (WARN_ON_ONCE(start < 0))
return -EINVAL; return -EINVAL;
if (unlikely(max < start)) if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
return -ENOSPC; return -EINVAL;
/* allocate id */ radix_tree_iter_init(&iter, start);
id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL); slot = idr_get_free(&idr->idr_rt, &iter, gfp, end);
if (unlikely(id < 0)) if (IS_ERR(slot))
return id; return PTR_ERR(slot);
if (unlikely(id > max))
return -ENOSPC;
idr_fill_slot(idr, ptr, id, pa); radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
return id; radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
return iter.index;
} }
EXPORT_SYMBOL_GPL(idr_alloc); EXPORT_SYMBOL_GPL(idr_alloc);
/** /**
* idr_alloc_cyclic - allocate new idr entry in a cyclical fashion * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
* @idr: the (initialized) idr * @idr: idr handle
* @ptr: pointer to be associated with the new id * @ptr: pointer to be associated with the new id
* @start: the minimum id (inclusive) * @start: the minimum id (inclusive)
* @end: the maximum id (exclusive, <= 0 for max) * @end: the maximum id (exclusive)
* @gfp_mask: memory allocation flags * @gfp: memory allocation flags
*
* Essentially the same as idr_alloc, but prefers to allocate progressively
* higher ids if it can. If the "cur" counter wraps, then it will start again
* at the "start" end of the range and allocate one that has already been used.
*/
int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
gfp_t gfp_mask)
{
int id;
id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
if (id == -ENOSPC)
id = idr_alloc(idr, ptr, start, end, gfp_mask);
if (likely(id >= 0))
idr->cur = id + 1;
return id;
}
EXPORT_SYMBOL(idr_alloc_cyclic);
static void idr_remove_warning(int id)
{
WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
}
static void sub_remove(struct idr *idp, int shift, int id)
{
struct idr_layer *p = idp->top;
struct idr_layer **pa[MAX_IDR_LEVEL + 1];
struct idr_layer ***paa = &pa[0];
struct idr_layer *to_free;
int n;
*paa = NULL;
*++paa = &idp->top;
while ((shift > 0) && p) {
n = (id >> shift) & IDR_MASK;
__clear_bit(n, p->bitmap);
*++paa = &p->ary[n];
p = p->ary[n];
shift -= IDR_BITS;
}
n = id & IDR_MASK;
if (likely(p != NULL && test_bit(n, p->bitmap))) {
__clear_bit(n, p->bitmap);
RCU_INIT_POINTER(p->ary[n], NULL);
to_free = NULL;
while(*paa && ! --((**paa)->count)){
if (to_free)
free_layer(idp, to_free);
to_free = **paa;
**paa-- = NULL;
}
if (!*paa)
idp->layers = 0;
if (to_free)
free_layer(idp, to_free);
} else
idr_remove_warning(id);
}
/**
* idr_remove - remove the given id and free its slot
* @idp: idr handle
* @id: unique key
*/
void idr_remove(struct idr *idp, int id)
{
struct idr_layer *p;
struct idr_layer *to_free;
if (id < 0)
return;
if (id > idr_max(idp->layers)) {
idr_remove_warning(id);
return;
}
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
idp->top->ary[0]) {
/*
* Single child at leftmost slot: we can shrink the tree.
* This level is not needed anymore since when layers are
* inserted, they are inserted at the top of the existing
* tree.
*/
to_free = idp->top;
p = idp->top->ary[0];
rcu_assign_pointer(idp->top, p);
--idp->layers;
to_free->count = 0;
bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
free_layer(idp, to_free);
}
}
EXPORT_SYMBOL(idr_remove);
static void __idr_remove_all(struct idr *idp)
{
int n, id, max;
int bt_mask;
struct idr_layer *p;
struct idr_layer *pa[MAX_IDR_LEVEL + 1];
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
*paa = idp->top;
RCU_INIT_POINTER(idp->top, NULL);
max = idr_max(idp->layers);
id = 0;
while (id >= 0 && id <= max) {
p = *paa;
while (n > IDR_BITS && p) {
n -= IDR_BITS;
p = p->ary[(id >> n) & IDR_MASK];
*++paa = p;
}
bt_mask = id;
id += 1 << n;
/* Get the highest bit that the above add changed from 0->1. */
while (n < fls(id ^ bt_mask)) {
if (*paa)
free_layer(idp, *paa);
n += IDR_BITS;
--paa;
}
}
idp->layers = 0;
}
/**
* idr_destroy - release all cached layers within an idr tree
* @idp: idr handle
* *
* Free all id mappings and all idp_layers. After this function, @idp is * Allocates an ID larger than the last ID allocated if one is available.
* completely unused and can be freed / recycled. The caller is * If not, it will attempt to allocate the smallest ID that is larger or
* responsible for ensuring that no one else accesses @idp during or after * equal to @start.
* idr_destroy().
*
* A typical clean-up sequence for objects stored in an idr tree will use
* idr_for_each() to free all objects, if necessary, then idr_destroy() to
* free up the id mappings and cached idr_layers.
*/ */
void idr_destroy(struct idr *idp) int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
{ {
__idr_remove_all(idp); int id, curr = idr->idr_next;
while (idp->id_free_cnt) { if (curr < start)
struct idr_layer *p = get_from_free_list(idp); curr = start;
kmem_cache_free(idr_layer_cache, p);
}
}
EXPORT_SYMBOL(idr_destroy);
void *idr_find_slowpath(struct idr *idp, int id) id = idr_alloc(idr, ptr, curr, end, gfp);
{ if ((id == -ENOSPC) && (curr > start))
int n; id = idr_alloc(idr, ptr, start, curr, gfp);
struct idr_layer *p;
if (id < 0) if (id >= 0)
return NULL; idr->idr_next = id + 1U;
p = rcu_dereference_raw(idp->top);
if (!p)
return NULL;
n = (p->layer+1) * IDR_BITS;
if (id > idr_max(p->layer + 1))
return NULL;
BUG_ON(n == 0);
while (n > 0 && p) { return id;
n -= IDR_BITS;
BUG_ON(n != p->layer*IDR_BITS);
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
}
return((void *)p);
} }
EXPORT_SYMBOL(idr_find_slowpath); EXPORT_SYMBOL(idr_alloc_cyclic);
/** /**
* idr_for_each - iterate through all stored pointers * idr_for_each - iterate through all stored pointers
* @idp: idr handle * @idr: idr handle
* @fn: function to be called for each pointer * @fn: function to be called for each pointer
* @data: data passed back to callback function * @data: data passed to callback function
* *
* Iterate over the pointers registered with the given idr. The * The callback function will be called for each entry in @idr, passing
* callback function will be called for each pointer currently * the id, the pointer and the data pointer passed to this function.
* registered, passing the id, the pointer and the data pointer passed
* to this function. It is not safe to modify the idr tree while in
* the callback, so functions such as idr_get_new and idr_remove are
* not allowed.
* *
* We check the return of @fn each time. If it returns anything other * If @fn returns anything other than %0, the iteration stops and that
* than %0, we break out and return that value. * value is returned from this function.
* *
* The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). * idr_for_each() can be called concurrently with idr_alloc() and
* idr_remove() if protected by RCU. Newly added entries may not be
* seen and deleted entries may be seen, but adding and removing entries
* will not cause other entries to be skipped, nor spurious ones to be seen.
*/ */
int idr_for_each(struct idr *idp, int idr_for_each(const struct idr *idr,
int (*fn)(int id, void *p, void *data), void *data) int (*fn)(int id, void *p, void *data), void *data)
{ {
int n, id, max, error = 0; struct radix_tree_iter iter;
struct idr_layer *p; void __rcu **slot;
struct idr_layer *pa[MAX_IDR_LEVEL + 1];
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
*paa = rcu_dereference_raw(idp->top);
max = idr_max(idp->layers);
id = 0;
while (id >= 0 && id <= max) {
p = *paa;
while (n > 0 && p) {
n -= IDR_BITS;
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
*++paa = p;
}
if (p) {
error = fn(id, (void *)p, data);
if (error)
break;
}
id += 1 << n; radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
while (n < fls(id)) { int ret = fn(iter.index, rcu_dereference_raw(*slot), data);
n += IDR_BITS; if (ret)
--paa; return ret;
}
} }
return error; return 0;
} }
EXPORT_SYMBOL(idr_for_each); EXPORT_SYMBOL(idr_for_each);
/** /**
* idr_get_next - lookup next object of id to given id. * idr_get_next - Find next populated entry
* @idp: idr handle * @idr: idr handle
* @nextidp: pointer to lookup key * @nextid: Pointer to lowest possible ID to return
*
* Returns pointer to registered object with id, which is next number to
* given id. After being looked up, *@nextidp will be updated for the next
* iteration.
* *
* This function can be called under rcu_read_lock(), given that the leaf * Returns the next populated entry in the tree with an ID greater than
* pointers lifetimes are correctly managed. * or equal to the value pointed to by @nextid. On exit, @nextid is updated
* to the ID of the found value. To use in a loop, the value pointed to by
* nextid must be incremented by the user.
*/ */
void *idr_get_next(struct idr *idp, int *nextidp) void *idr_get_next(struct idr *idr, int *nextid)
{ {
struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1]; struct radix_tree_iter iter;
struct idr_layer **paa = &pa[0]; void __rcu **slot;
int id = *nextidp;
int n, max;
/* find first ent */ slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid);
p = *paa = rcu_dereference_raw(idp->top); if (!slot)
if (!p)
return NULL; return NULL;
n = (p->layer + 1) * IDR_BITS;
max = idr_max(p->layer + 1);
while (id >= 0 && id <= max) {
p = *paa;
while (n > 0 && p) {
n -= IDR_BITS;
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
*++paa = p;
}
if (p) {
*nextidp = id;
return p;
}
/* *nextid = iter.index;
* Proceed to the next layer at the current level. Unlike return rcu_dereference_raw(*slot);
* idr_for_each(), @id isn't guaranteed to be aligned to
* layer boundary at this point and adding 1 << n may
* incorrectly skip IDs. Make sure we jump to the
* beginning of the next layer using round_up().
*/
id = round_up(id + 1, 1 << n);
while (n < fls(id)) {
n += IDR_BITS;
--paa;
}
}
return NULL;
} }
EXPORT_SYMBOL(idr_get_next); EXPORT_SYMBOL(idr_get_next);
/** /**
* idr_replace - replace pointer for given id * idr_replace - replace pointer for given id
* @idp: idr handle * @idr: idr handle
* @ptr: pointer you want associated with the id * @ptr: New pointer to associate with the ID
* @id: lookup key * @id: Lookup key
* *
* Replace the pointer registered with an id and return the old value. * Replace the pointer registered with an ID and return the old value.
* A %-ENOENT return indicates that @id was not found. * This function can be called under the RCU read lock concurrently with
* A %-EINVAL return indicates that @id was not within valid constraints. * idr_alloc() and idr_remove() (as long as the ID being removed is not
* the one being replaced!).
* *
* The caller must serialize with writers. * Returns: 0 on success. %-ENOENT indicates that @id was not found.
* %-EINVAL indicates that @id or @ptr were not valid.
*/ */
void *idr_replace(struct idr *idp, void *ptr, int id) void *idr_replace(struct idr *idr, void *ptr, int id)
{ {
int n; struct radix_tree_node *node;
struct idr_layer *p, *old_p; void __rcu **slot = NULL;
void *entry;
if (id < 0) if (WARN_ON_ONCE(id < 0))
return ERR_PTR(-EINVAL);
if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
p = idp->top; entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
if (!p) if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
return ERR_PTR(-ENOENT);
if (id > idr_max(p->layer + 1))
return ERR_PTR(-ENOENT);
n = p->layer * IDR_BITS;
while ((n > 0) && p) {
p = p->ary[(id >> n) & IDR_MASK];
n -= IDR_BITS;
}
n = id & IDR_MASK;
if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
old_p = p->ary[n]; __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL, NULL);
rcu_assign_pointer(p->ary[n], ptr);
return old_p; return entry;
} }
EXPORT_SYMBOL(idr_replace); EXPORT_SYMBOL(idr_replace);
void __init idr_init_cache(void)
{
idr_layer_cache = kmem_cache_create("idr_layer_cache",
sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
}
/**
* idr_init - initialize idr handle
* @idp: idr handle
*
* This function is use to set up the handle (@idp) that you will pass
* to the rest of the functions.
*/
void idr_init(struct idr *idp)
{
memset(idp, 0, sizeof(struct idr));
spin_lock_init(&idp->lock);
}
EXPORT_SYMBOL(idr_init);
static int idr_has_entry(int id, void *p, void *data)
{
return 1;
}
bool idr_is_empty(struct idr *idp)
{
return !idr_for_each(idp, idr_has_entry, NULL);
}
EXPORT_SYMBOL(idr_is_empty);
/** /**
* DOC: IDA description * DOC: IDA description
* IDA - IDR based ID allocator
* *
* This is id allocator without id -> pointer translation. Memory * The IDA is an ID allocator which does not provide the ability to
* usage is much lower than full blown idr because each id only * associate an ID with a pointer. As such, it only needs to store one
* occupies a bit. ida uses a custom leaf node which contains * bit per ID, and so is more space efficient than an IDR. To use an IDA,
* IDA_BITMAP_BITS slots. * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
* then initialise it using ida_init()). To allocate a new ID, call
* ida_simple_get(). To free an ID, call ida_simple_remove().
*
* If you have more complex locking requirements, use a loop around
* ida_pre_get() and ida_get_new() to allocate a new ID. Then use
* ida_remove() to free an ID. You must make sure that ida_get_new() and
* ida_remove() cannot be called at the same time as each other for the
* same IDA.
* *
* 2007-04-25 written by Tejun Heo <htejun@gmail.com> * You can also use ida_get_new_above() if you need an ID to be allocated
* above a particular number. ida_destroy() can be used to dispose of an
* IDA without needing to free the individual IDs in it. You can use
* ida_is_empty() to find out whether the IDA has any IDs currently allocated.
*
* IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
* limitation, it should be quite straightforward to raise the maximum.
*/ */
static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) /*
{ * Developer's notes:
unsigned long flags; *
* The IDA uses the functionality provided by the IDR & radix tree to store
if (!ida->free_bitmap) { * bitmaps in each entry. The IDR_FREE tag means there is at least one bit
spin_lock_irqsave(&ida->idr.lock, flags); * free, unlike the IDR where it means at least one entry is free.
if (!ida->free_bitmap) { *
ida->free_bitmap = bitmap; * I considered telling the radix tree that each slot is an order-10 node
bitmap = NULL; * and storing the bit numbers in the radix tree, but the radix tree can't
} * allow a single multiorder entry at index 0, which would significantly
spin_unlock_irqrestore(&ida->idr.lock, flags); * increase memory consumption for the IDA. So instead we divide the index
} * by the number of bits in the leaf bitmap before doing a radix tree lookup.
kfree(bitmap);
}
/**
* ida_pre_get - reserve resources for ida allocation
* @ida: ida handle
* @gfp_mask: memory allocation flag
* *
* This function should be called prior to locking and calling the * As an optimisation, if there are only a few low bits set in any given
* following function. It preallocates enough memory to satisfy the * leaf, instead of allocating a 128-byte bitmap, we use the 'exceptional
* worst possible allocation. * entry' functionality of the radix tree to store BITS_PER_LONG - 2 bits
* directly in the entry. By being really tricksy, we could store
* BITS_PER_LONG - 1 bits, but there're diminishing returns after optimising
* for 0-3 allocated IDs.
* *
* If the system is REALLY out of memory this function returns %0, * We allow the radix tree 'exceptional' count to get out of date. Nothing
* otherwise %1. * in the IDA nor the radix tree code checks it. If it becomes important
* to maintain an accurate exceptional count, switch the rcu_assign_pointer()
* calls to radix_tree_iter_replace() which will correct the exceptional
* count.
*
* The IDA always requires a lock to alloc/free. If we add a 'test_bit'
* equivalent, it will still need locking. Going to RCU lookup would require
* using RCU to free bitmaps, and that's not trivial without embedding an
* RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
* bitmap, which is excessive.
*/ */
int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
{
/* allocate idr_layers */
if (!__idr_pre_get(&ida->idr, gfp_mask))
return 0;
/* allocate free_bitmap */
if (!ida->free_bitmap) {
struct ida_bitmap *bitmap;
bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); #define IDA_MAX (0x80000000U / IDA_BITMAP_BITS)
if (!bitmap)
return 0;
free_bitmap(ida, bitmap);
}
return 1;
}
EXPORT_SYMBOL(ida_pre_get);
/** /**
* ida_get_new_above - allocate new ID above or equal to a start id * ida_get_new_above - allocate new ID above or equal to a start id
* @ida: ida handle * @ida: ida handle
* @starting_id: id to start search at * @start: id to start search at
* @p_id: pointer to the allocated handle * @id: pointer to the allocated handle
* *
* Allocate new ID above or equal to @starting_id. It should be called * Allocate new ID above or equal to @start. It should be called
* with any required locks. * with any required locks to ensure that concurrent calls to
* ida_get_new_above() / ida_get_new() / ida_remove() are not allowed.
* Consider using ida_simple_get() if you do not have complex locking
* requirements.
* *
* If memory is required, it will return %-EAGAIN, you should unlock * If memory is required, it will return %-EAGAIN, you should unlock
* and go back to the ida_pre_get() call. If the ida is full, it will * and go back to the ida_pre_get() call. If the ida is full, it will
* return %-ENOSPC. * return %-ENOSPC. On success, it will return 0.
*
* Note that callers must ensure that concurrent access to @ida is not possible.
* See ida_simple_get() for a varaint which takes care of locking.
* *
* @p_id returns a value in the range @starting_id ... %0x7fffffff. * @id returns a value in the range @start ... %0x7fffffff.
*/ */
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) int ida_get_new_above(struct ida *ida, int start, int *id)
{ {
struct idr_layer *pa[MAX_IDR_LEVEL + 1]; struct radix_tree_root *root = &ida->ida_rt;
void __rcu **slot;
struct radix_tree_iter iter;
struct ida_bitmap *bitmap; struct ida_bitmap *bitmap;
unsigned long flags; unsigned long index;
int idr_id = starting_id / IDA_BITMAP_BITS; unsigned bit, ebit;
int offset = starting_id % IDA_BITMAP_BITS; int new;
int t, id;
index = start / IDA_BITMAP_BITS;
restart: bit = start % IDA_BITMAP_BITS;
/* get vacant slot */ ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT;
t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
if (t < 0) slot = radix_tree_iter_init(&iter, index);
return t == -ENOMEM ? -EAGAIN : t; for (;;) {
if (slot)
if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) slot = radix_tree_next_slot(slot, &iter,
return -ENOSPC; RADIX_TREE_ITER_TAGGED);
if (!slot) {
if (t != idr_id) slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX);
offset = 0; if (IS_ERR(slot)) {
idr_id = t; if (slot == ERR_PTR(-ENOMEM))
/* if bitmap isn't there, create a new one */
bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
if (!bitmap) {
spin_lock_irqsave(&ida->idr.lock, flags);
bitmap = ida->free_bitmap;
ida->free_bitmap = NULL;
spin_unlock_irqrestore(&ida->idr.lock, flags);
if (!bitmap)
return -EAGAIN; return -EAGAIN;
return PTR_ERR(slot);
memset(bitmap, 0, sizeof(struct ida_bitmap)); }
rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], }
(void *)bitmap); if (iter.index > index) {
pa[0]->count++; bit = 0;
ebit = RADIX_TREE_EXCEPTIONAL_SHIFT;
}
new = iter.index * IDA_BITMAP_BITS;
bitmap = rcu_dereference_raw(*slot);
if (radix_tree_exception(bitmap)) {
unsigned long tmp = (unsigned long)bitmap;
ebit = find_next_zero_bit(&tmp, BITS_PER_LONG, ebit);
if (ebit < BITS_PER_LONG) {
tmp |= 1UL << ebit;
rcu_assign_pointer(*slot, (void *)tmp);
*id = new + ebit - RADIX_TREE_EXCEPTIONAL_SHIFT;
return 0;
} }
bitmap = this_cpu_xchg(ida_bitmap, NULL);
/* lookup for empty slot */ if (!bitmap)
t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); return -EAGAIN;
if (t == IDA_BITMAP_BITS) { memset(bitmap, 0, sizeof(*bitmap));
/* no empty slot after offset, continue to the next chunk */ bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
idr_id++; rcu_assign_pointer(*slot, bitmap);
offset = 0;
goto restart;
} }
id = idr_id * IDA_BITMAP_BITS + t; if (bitmap) {
if (id >= MAX_IDR_BIT) bit = find_next_zero_bit(bitmap->bitmap,
IDA_BITMAP_BITS, bit);
new += bit;
if (new < 0)
return -ENOSPC; return -ENOSPC;
if (bit == IDA_BITMAP_BITS)
continue;
__set_bit(t, bitmap->bitmap); __set_bit(bit, bitmap->bitmap);
if (++bitmap->nr_busy == IDA_BITMAP_BITS) if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
idr_mark_full(pa, idr_id); radix_tree_iter_tag_clear(root, &iter,
IDR_FREE);
*p_id = id; } else {
new += bit;
/* Each leaf node can handle nearly a thousand slots and the if (new < 0)
* whole idea of ida is to have small memory foot print. return -ENOSPC;
* Throw away extra resources one by one after each successful if (ebit < BITS_PER_LONG) {
* allocation. bitmap = (void *)((1UL << ebit) |
*/ RADIX_TREE_EXCEPTIONAL_ENTRY);
if (ida->idr.id_free_cnt || ida->free_bitmap) { radix_tree_iter_replace(root, &iter, slot,
struct idr_layer *p = get_from_free_list(&ida->idr); bitmap);
if (p) *id = new;
kmem_cache_free(idr_layer_cache, p); return 0;
}
bitmap = this_cpu_xchg(ida_bitmap, NULL);
if (!bitmap)
return -EAGAIN;
memset(bitmap, 0, sizeof(*bitmap));
__set_bit(bit, bitmap->bitmap);
radix_tree_iter_replace(root, &iter, slot, bitmap);
} }
*id = new;
return 0; return 0;
}
} }
EXPORT_SYMBOL(ida_get_new_above); EXPORT_SYMBOL(ida_get_new_above);
/** /**
* ida_remove - remove the given ID * ida_remove - Free the given ID
* @ida: ida handle * @ida: ida handle
* @id: ID to free * @id: ID to free
*
* This function should not be called at the same time as ida_get_new_above().
*/ */
void ida_remove(struct ida *ida, int id) void ida_remove(struct ida *ida, int id)
{ {
struct idr_layer *p = ida->idr.top; unsigned long index = id / IDA_BITMAP_BITS;
int shift = (ida->idr.layers - 1) * IDR_BITS; unsigned offset = id % IDA_BITMAP_BITS;
int idr_id = id / IDA_BITMAP_BITS;
int offset = id % IDA_BITMAP_BITS;
int n;
struct ida_bitmap *bitmap; struct ida_bitmap *bitmap;
unsigned long *btmp;
struct radix_tree_iter iter;
void __rcu **slot;
if (idr_id > idr_max(ida->idr.layers)) slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index);
if (!slot)
goto err; goto err;
/* clear full bits while looking up the leaf idr_layer */ bitmap = rcu_dereference_raw(*slot);
while ((shift > 0) && p) { if (radix_tree_exception(bitmap)) {
n = (idr_id >> shift) & IDR_MASK; btmp = (unsigned long *)slot;
__clear_bit(n, p->bitmap); offset += RADIX_TREE_EXCEPTIONAL_SHIFT;
p = p->ary[n]; if (offset >= BITS_PER_LONG)
shift -= IDR_BITS;
}
if (p == NULL)
goto err; goto err;
} else {
n = idr_id & IDR_MASK; btmp = bitmap->bitmap;
__clear_bit(n, p->bitmap); }
if (!test_bit(offset, btmp))
bitmap = (void *)p->ary[n];
if (!bitmap || !test_bit(offset, bitmap->bitmap))
goto err; goto err;
/* update bitmap and remove it if empty */ __clear_bit(offset, btmp);
__clear_bit(offset, bitmap->bitmap); radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE);
if (--bitmap->nr_busy == 0) { if (radix_tree_exception(bitmap)) {
__set_bit(n, p->bitmap); /* to please idr_remove() */ if (rcu_dereference_raw(*slot) ==
idr_remove(&ida->idr, idr_id); (void *)RADIX_TREE_EXCEPTIONAL_ENTRY)
free_bitmap(ida, bitmap); radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
} else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) {
kfree(bitmap);
radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
} }
return; return;
err: err:
WARN(1, "ida_remove called for id=%d which is not allocated.\n", id); WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
} }
EXPORT_SYMBOL(ida_remove); EXPORT_SYMBOL(ida_remove);
/** /**
* ida_destroy - release all cached layers within an ida tree * ida_destroy - Free the contents of an ida
* @ida: ida handle * @ida: ida handle
*
* Calling this function releases all resources associated with an IDA. When
* this call returns, the IDA is empty and can be reused or freed. The caller
* should not allow ida_remove() or ida_get_new_above() to be called at the
* same time.
*/ */
void ida_destroy(struct ida *ida) void ida_destroy(struct ida *ida)
{ {
idr_destroy(&ida->idr); struct radix_tree_iter iter;
kfree(ida->free_bitmap); void __rcu **slot;
radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) {
struct ida_bitmap *bitmap = rcu_dereference_raw(*slot);
if (!radix_tree_exception(bitmap))
kfree(bitmap);
radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
}
} }
EXPORT_SYMBOL(ida_destroy); EXPORT_SYMBOL(ida_destroy);
...@@ -1141,18 +482,3 @@ void ida_simple_remove(struct ida *ida, unsigned int id) ...@@ -1141,18 +482,3 @@ void ida_simple_remove(struct ida *ida, unsigned int id)
spin_unlock_irqrestore(&simple_ida_lock, flags); spin_unlock_irqrestore(&simple_ida_lock, flags);
} }
EXPORT_SYMBOL(ida_simple_remove); EXPORT_SYMBOL(ida_simple_remove);
/**
* ida_init - initialize ida handle
* @ida: ida handle
*
* This function is use to set up the handle (@ida) that you will pass
* to the rest of the functions.
*/
void ida_init(struct ida *ida)
{
memset(ida, 0, sizeof(struct ida));
idr_init(&ida->idr);
}
EXPORT_SYMBOL(ida_init);
...@@ -22,20 +22,21 @@ ...@@ -22,20 +22,21 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/export.h>
#include <linux/idr.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/export.h> #include <linux/kmemleak.h>
#include <linux/radix-tree.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/preempt.h> /* in_interrupt() */
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/kmemleak.h>
#include <linux/cpu.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/bitops.h>
#include <linux/rcupdate.h>
#include <linux/preempt.h> /* in_interrupt() */
/* Number of nodes in fully populated tree of given height */ /* Number of nodes in fully populated tree of given height */
...@@ -59,12 +60,29 @@ static struct kmem_cache *radix_tree_node_cachep; ...@@ -59,12 +60,29 @@ static struct kmem_cache *radix_tree_node_cachep;
*/ */
#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
/*
* The IDR does not have to be as high as the radix tree since it uses
* signed integers, not unsigned longs.
*/
#define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1)
#define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \
RADIX_TREE_MAP_SHIFT))
#define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
/*
* The IDA is even shorter since it uses a bitmap at the last level.
*/
#define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS))
#define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \
RADIX_TREE_MAP_SHIFT))
#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
/* /*
* Per-cpu pool of preloaded nodes * Per-cpu pool of preloaded nodes
*/ */
struct radix_tree_preload { struct radix_tree_preload {
unsigned nr; unsigned nr;
/* nodes->private_data points to next preallocated node */ /* nodes->parent points to next preallocated node */
struct radix_tree_node *nodes; struct radix_tree_node *nodes;
}; };
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
...@@ -83,35 +101,38 @@ static inline void *node_to_entry(void *ptr) ...@@ -83,35 +101,38 @@ static inline void *node_to_entry(void *ptr)
#ifdef CONFIG_RADIX_TREE_MULTIORDER #ifdef CONFIG_RADIX_TREE_MULTIORDER
/* Sibling slots point directly to another slot in the same node */ /* Sibling slots point directly to another slot in the same node */
static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node) static inline
bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
{ {
void **ptr = node; void __rcu **ptr = node;
return (parent->slots <= ptr) && return (parent->slots <= ptr) &&
(ptr < parent->slots + RADIX_TREE_MAP_SIZE); (ptr < parent->slots + RADIX_TREE_MAP_SIZE);
} }
#else #else
static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node) static inline
bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
{ {
return false; return false;
} }
#endif #endif
static inline unsigned long get_slot_offset(struct radix_tree_node *parent, static inline unsigned long
void **slot) get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
{ {
return slot - parent->slots; return slot - parent->slots;
} }
static unsigned int radix_tree_descend(struct radix_tree_node *parent, static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
struct radix_tree_node **nodep, unsigned long index) struct radix_tree_node **nodep, unsigned long index)
{ {
unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
void **entry = rcu_dereference_raw(parent->slots[offset]); void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
#ifdef CONFIG_RADIX_TREE_MULTIORDER #ifdef CONFIG_RADIX_TREE_MULTIORDER
if (radix_tree_is_internal_node(entry)) { if (radix_tree_is_internal_node(entry)) {
if (is_sibling_entry(parent, entry)) { if (is_sibling_entry(parent, entry)) {
void **sibentry = (void **) entry_to_node(entry); void __rcu **sibentry;
sibentry = (void __rcu **) entry_to_node(entry);
offset = get_slot_offset(parent, sibentry); offset = get_slot_offset(parent, sibentry);
entry = rcu_dereference_raw(*sibentry); entry = rcu_dereference_raw(*sibentry);
} }
...@@ -122,7 +143,7 @@ static unsigned int radix_tree_descend(struct radix_tree_node *parent, ...@@ -122,7 +143,7 @@ static unsigned int radix_tree_descend(struct radix_tree_node *parent,
return offset; return offset;
} }
static inline gfp_t root_gfp_mask(struct radix_tree_root *root) static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
{ {
return root->gfp_mask & __GFP_BITS_MASK; return root->gfp_mask & __GFP_BITS_MASK;
} }
...@@ -139,42 +160,48 @@ static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, ...@@ -139,42 +160,48 @@ static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
__clear_bit(offset, node->tags[tag]); __clear_bit(offset, node->tags[tag]);
} }
static inline int tag_get(struct radix_tree_node *node, unsigned int tag, static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
int offset) int offset)
{ {
return test_bit(offset, node->tags[tag]); return test_bit(offset, node->tags[tag]);
} }
static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
{ {
root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); root->gfp_mask |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
} }
static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
{ {
root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); root->gfp_mask &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
} }
static inline void root_tag_clear_all(struct radix_tree_root *root) static inline void root_tag_clear_all(struct radix_tree_root *root)
{ {
root->gfp_mask &= __GFP_BITS_MASK; root->gfp_mask &= (1 << ROOT_TAG_SHIFT) - 1;
} }
static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
{ {
return (__force int)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); return (__force int)root->gfp_mask & (1 << (tag + ROOT_TAG_SHIFT));
} }
static inline unsigned root_tags_get(struct radix_tree_root *root) static inline unsigned root_tags_get(const struct radix_tree_root *root)
{ {
return (__force unsigned)root->gfp_mask >> __GFP_BITS_SHIFT; return (__force unsigned)root->gfp_mask >> ROOT_TAG_SHIFT;
}
static inline bool is_idr(const struct radix_tree_root *root)
{
return !!(root->gfp_mask & ROOT_IS_IDR);
} }
/* /*
* Returns 1 if any slot in the node has this tag set. * Returns 1 if any slot in the node has this tag set.
* Otherwise returns 0. * Otherwise returns 0.
*/ */
static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) static inline int any_tag_set(const struct radix_tree_node *node,
unsigned int tag)
{ {
unsigned idx; unsigned idx;
for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
...@@ -184,6 +211,11 @@ static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) ...@@ -184,6 +211,11 @@ static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
return 0; return 0;
} }
static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
{
bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE);
}
/** /**
* radix_tree_find_next_bit - find the next set bit in a memory region * radix_tree_find_next_bit - find the next set bit in a memory region
* *
...@@ -232,11 +264,18 @@ static inline unsigned long shift_maxindex(unsigned int shift) ...@@ -232,11 +264,18 @@ static inline unsigned long shift_maxindex(unsigned int shift)
return (RADIX_TREE_MAP_SIZE << shift) - 1; return (RADIX_TREE_MAP_SIZE << shift) - 1;
} }
static inline unsigned long node_maxindex(struct radix_tree_node *node) static inline unsigned long node_maxindex(const struct radix_tree_node *node)
{ {
return shift_maxindex(node->shift); return shift_maxindex(node->shift);
} }
static unsigned long next_index(unsigned long index,
const struct radix_tree_node *node,
unsigned long offset)
{
return (index & ~node_maxindex(node)) + (offset << node->shift);
}
#ifndef __KERNEL__ #ifndef __KERNEL__
static void dump_node(struct radix_tree_node *node, unsigned long index) static void dump_node(struct radix_tree_node *node, unsigned long index)
{ {
...@@ -275,11 +314,59 @@ static void radix_tree_dump(struct radix_tree_root *root) ...@@ -275,11 +314,59 @@ static void radix_tree_dump(struct radix_tree_root *root)
{ {
pr_debug("radix root: %p rnode %p tags %x\n", pr_debug("radix root: %p rnode %p tags %x\n",
root, root->rnode, root, root->rnode,
root->gfp_mask >> __GFP_BITS_SHIFT); root->gfp_mask >> ROOT_TAG_SHIFT);
if (!radix_tree_is_internal_node(root->rnode)) if (!radix_tree_is_internal_node(root->rnode))
return; return;
dump_node(entry_to_node(root->rnode), 0); dump_node(entry_to_node(root->rnode), 0);
} }
static void dump_ida_node(void *entry, unsigned long index)
{
unsigned long i;
if (!entry)
return;
if (radix_tree_is_internal_node(entry)) {
struct radix_tree_node *node = entry_to_node(entry);
pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n",
node, node->offset, index * IDA_BITMAP_BITS,
((index | node_maxindex(node)) + 1) *
IDA_BITMAP_BITS - 1,
node->parent, node->tags[0][0], node->shift,
node->count);
for (i = 0; i < RADIX_TREE_MAP_SIZE; i++)
dump_ida_node(node->slots[i],
index | (i << node->shift));
} else if (radix_tree_exceptional_entry(entry)) {
pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n",
entry, (int)(index & RADIX_TREE_MAP_MASK),
index * IDA_BITMAP_BITS,
index * IDA_BITMAP_BITS + BITS_PER_LONG -
RADIX_TREE_EXCEPTIONAL_SHIFT,
(unsigned long)entry >>
RADIX_TREE_EXCEPTIONAL_SHIFT);
} else {
struct ida_bitmap *bitmap = entry;
pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap,
(int)(index & RADIX_TREE_MAP_MASK),
index * IDA_BITMAP_BITS,
(index + 1) * IDA_BITMAP_BITS - 1);
for (i = 0; i < IDA_BITMAP_LONGS; i++)
pr_cont(" %lx", bitmap->bitmap[i]);
pr_cont("\n");
}
}
static void ida_dump(struct ida *ida)
{
struct radix_tree_root *root = &ida->ida_rt;
pr_debug("ida: %p node %p free %d\n", ida, root->rnode,
root->gfp_mask >> ROOT_TAG_SHIFT);
dump_ida_node(root->rnode, 0);
}
#endif #endif
/* /*
...@@ -287,13 +374,12 @@ static void radix_tree_dump(struct radix_tree_root *root) ...@@ -287,13 +374,12 @@ static void radix_tree_dump(struct radix_tree_root *root)
* that the caller has pinned this thread of control to the current CPU. * that the caller has pinned this thread of control to the current CPU.
*/ */
static struct radix_tree_node * static struct radix_tree_node *
radix_tree_node_alloc(struct radix_tree_root *root, radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
struct radix_tree_node *parent, struct radix_tree_root *root,
unsigned int shift, unsigned int offset, unsigned int shift, unsigned int offset,
unsigned int count, unsigned int exceptional) unsigned int count, unsigned int exceptional)
{ {
struct radix_tree_node *ret = NULL; struct radix_tree_node *ret = NULL;
gfp_t gfp_mask = root_gfp_mask(root);
/* /*
* Preload code isn't irq safe and it doesn't make sense to use * Preload code isn't irq safe and it doesn't make sense to use
...@@ -321,8 +407,7 @@ radix_tree_node_alloc(struct radix_tree_root *root, ...@@ -321,8 +407,7 @@ radix_tree_node_alloc(struct radix_tree_root *root,
rtp = this_cpu_ptr(&radix_tree_preloads); rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr) { if (rtp->nr) {
ret = rtp->nodes; ret = rtp->nodes;
rtp->nodes = ret->private_data; rtp->nodes = ret->parent;
ret->private_data = NULL;
rtp->nr--; rtp->nr--;
} }
/* /*
...@@ -336,11 +421,12 @@ radix_tree_node_alloc(struct radix_tree_root *root, ...@@ -336,11 +421,12 @@ radix_tree_node_alloc(struct radix_tree_root *root,
out: out:
BUG_ON(radix_tree_is_internal_node(ret)); BUG_ON(radix_tree_is_internal_node(ret));
if (ret) { if (ret) {
ret->parent = parent;
ret->shift = shift; ret->shift = shift;
ret->offset = offset; ret->offset = offset;
ret->count = count; ret->count = count;
ret->exceptional = exceptional; ret->exceptional = exceptional;
ret->parent = parent;
ret->root = root;
} }
return ret; return ret;
} }
...@@ -399,7 +485,7 @@ static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) ...@@ -399,7 +485,7 @@ static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
preempt_disable(); preempt_disable();
rtp = this_cpu_ptr(&radix_tree_preloads); rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr < nr) { if (rtp->nr < nr) {
node->private_data = rtp->nodes; node->parent = rtp->nodes;
rtp->nodes = node; rtp->nodes = node;
rtp->nr++; rtp->nr++;
} else { } else {
...@@ -510,7 +596,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) ...@@ -510,7 +596,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
return __radix_tree_preload(gfp_mask, nr_nodes); return __radix_tree_preload(gfp_mask, nr_nodes);
} }
static unsigned radix_tree_load_root(struct radix_tree_root *root, static unsigned radix_tree_load_root(const struct radix_tree_root *root,
struct radix_tree_node **nodep, unsigned long *maxindex) struct radix_tree_node **nodep, unsigned long *maxindex)
{ {
struct radix_tree_node *node = rcu_dereference_raw(root->rnode); struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
...@@ -530,10 +616,10 @@ static unsigned radix_tree_load_root(struct radix_tree_root *root, ...@@ -530,10 +616,10 @@ static unsigned radix_tree_load_root(struct radix_tree_root *root,
/* /*
* Extend a radix tree so it can store key @index. * Extend a radix tree so it can store key @index.
*/ */
static int radix_tree_extend(struct radix_tree_root *root, static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
unsigned long index, unsigned int shift) unsigned long index, unsigned int shift)
{ {
struct radix_tree_node *slot; void *entry;
unsigned int maxshift; unsigned int maxshift;
int tag; int tag;
...@@ -542,32 +628,44 @@ static int radix_tree_extend(struct radix_tree_root *root, ...@@ -542,32 +628,44 @@ static int radix_tree_extend(struct radix_tree_root *root,
while (index > shift_maxindex(maxshift)) while (index > shift_maxindex(maxshift))
maxshift += RADIX_TREE_MAP_SHIFT; maxshift += RADIX_TREE_MAP_SHIFT;
slot = root->rnode; entry = rcu_dereference_raw(root->rnode);
if (!slot) if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
goto out; goto out;
do { do {
struct radix_tree_node *node = radix_tree_node_alloc(root, struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
NULL, shift, 0, 1, 0); root, shift, 0, 1, 0);
if (!node) if (!node)
return -ENOMEM; return -ENOMEM;
/* Propagate the aggregated tag info into the new root */ if (is_idr(root)) {
all_tag_set(node, IDR_FREE);
if (!root_tag_get(root, IDR_FREE)) {
tag_clear(node, IDR_FREE, 0);
root_tag_set(root, IDR_FREE);
}
} else {
/* Propagate the aggregated tag info to the new child */
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
if (root_tag_get(root, tag)) if (root_tag_get(root, tag))
tag_set(node, tag, 0); tag_set(node, tag, 0);
} }
}
BUG_ON(shift > BITS_PER_LONG); BUG_ON(shift > BITS_PER_LONG);
if (radix_tree_is_internal_node(slot)) { if (radix_tree_is_internal_node(entry)) {
entry_to_node(slot)->parent = node; entry_to_node(entry)->parent = node;
} else if (radix_tree_exceptional_entry(slot)) { } else if (radix_tree_exceptional_entry(entry)) {
/* Moving an exceptional root->rnode to a node */ /* Moving an exceptional root->rnode to a node */
node->exceptional = 1; node->exceptional = 1;
} }
node->slots[0] = slot; /*
slot = node_to_entry(node); * entry was already in the radix tree, so we do not need
rcu_assign_pointer(root->rnode, slot); * rcu_assign_pointer here
*/
node->slots[0] = (void __rcu *)entry;
entry = node_to_entry(node);
rcu_assign_pointer(root->rnode, entry);
shift += RADIX_TREE_MAP_SHIFT; shift += RADIX_TREE_MAP_SHIFT;
} while (shift <= maxshift); } while (shift <= maxshift);
out: out:
...@@ -578,12 +676,14 @@ static int radix_tree_extend(struct radix_tree_root *root, ...@@ -578,12 +676,14 @@ static int radix_tree_extend(struct radix_tree_root *root,
* radix_tree_shrink - shrink radix tree to minimum height * radix_tree_shrink - shrink radix tree to minimum height
* @root radix tree root * @root radix tree root
*/ */
static inline void radix_tree_shrink(struct radix_tree_root *root, static inline bool radix_tree_shrink(struct radix_tree_root *root,
radix_tree_update_node_t update_node, radix_tree_update_node_t update_node,
void *private) void *private)
{ {
bool shrunk = false;
for (;;) { for (;;) {
struct radix_tree_node *node = root->rnode; struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
struct radix_tree_node *child; struct radix_tree_node *child;
if (!radix_tree_is_internal_node(node)) if (!radix_tree_is_internal_node(node))
...@@ -597,7 +697,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root, ...@@ -597,7 +697,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root,
*/ */
if (node->count != 1) if (node->count != 1)
break; break;
child = node->slots[0]; child = rcu_dereference_raw(node->slots[0]);
if (!child) if (!child)
break; break;
if (!radix_tree_is_internal_node(child) && node->shift) if (!radix_tree_is_internal_node(child) && node->shift)
...@@ -613,7 +713,9 @@ static inline void radix_tree_shrink(struct radix_tree_root *root, ...@@ -613,7 +713,9 @@ static inline void radix_tree_shrink(struct radix_tree_root *root,
* (node->slots[0]), it will be safe to dereference the new * (node->slots[0]), it will be safe to dereference the new
* one (root->rnode) as far as dependent read barriers go. * one (root->rnode) as far as dependent read barriers go.
*/ */
root->rnode = child; root->rnode = (void __rcu *)child;
if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
root_tag_clear(root, IDR_FREE);
/* /*
* We have a dilemma here. The node's slot[0] must not be * We have a dilemma here. The node's slot[0] must not be
...@@ -635,27 +737,34 @@ static inline void radix_tree_shrink(struct radix_tree_root *root, ...@@ -635,27 +737,34 @@ static inline void radix_tree_shrink(struct radix_tree_root *root,
*/ */
node->count = 0; node->count = 0;
if (!radix_tree_is_internal_node(child)) { if (!radix_tree_is_internal_node(child)) {
node->slots[0] = RADIX_TREE_RETRY; node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
if (update_node) if (update_node)
update_node(node, private); update_node(node, private);
} }
WARN_ON_ONCE(!list_empty(&node->private_list)); WARN_ON_ONCE(!list_empty(&node->private_list));
radix_tree_node_free(node); radix_tree_node_free(node);
shrunk = true;
} }
return shrunk;
} }
static void delete_node(struct radix_tree_root *root, static bool delete_node(struct radix_tree_root *root,
struct radix_tree_node *node, struct radix_tree_node *node,
radix_tree_update_node_t update_node, void *private) radix_tree_update_node_t update_node, void *private)
{ {
bool deleted = false;
do { do {
struct radix_tree_node *parent; struct radix_tree_node *parent;
if (node->count) { if (node->count) {
if (node == entry_to_node(root->rnode)) if (node_to_entry(node) ==
radix_tree_shrink(root, update_node, private); rcu_dereference_raw(root->rnode))
return; deleted |= radix_tree_shrink(root, update_node,
private);
return deleted;
} }
parent = node->parent; parent = node->parent;
...@@ -663,15 +772,23 @@ static void delete_node(struct radix_tree_root *root, ...@@ -663,15 +772,23 @@ static void delete_node(struct radix_tree_root *root,
parent->slots[node->offset] = NULL; parent->slots[node->offset] = NULL;
parent->count--; parent->count--;
} else { } else {
/*
* Shouldn't the tags already have all been cleared
* by the caller?
*/
if (!is_idr(root))
root_tag_clear_all(root); root_tag_clear_all(root);
root->rnode = NULL; root->rnode = NULL;
} }
WARN_ON_ONCE(!list_empty(&node->private_list)); WARN_ON_ONCE(!list_empty(&node->private_list));
radix_tree_node_free(node); radix_tree_node_free(node);
deleted = true;
node = parent; node = parent;
} while (node); } while (node);
return deleted;
} }
/** /**
...@@ -693,13 +810,14 @@ static void delete_node(struct radix_tree_root *root, ...@@ -693,13 +810,14 @@ static void delete_node(struct radix_tree_root *root,
*/ */
int __radix_tree_create(struct radix_tree_root *root, unsigned long index, int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
unsigned order, struct radix_tree_node **nodep, unsigned order, struct radix_tree_node **nodep,
void ***slotp) void __rcu ***slotp)
{ {
struct radix_tree_node *node = NULL, *child; struct radix_tree_node *node = NULL, *child;
void **slot = (void **)&root->rnode; void __rcu **slot = (void __rcu **)&root->rnode;
unsigned long maxindex; unsigned long maxindex;
unsigned int shift, offset = 0; unsigned int shift, offset = 0;
unsigned long max = index | ((1UL << order) - 1); unsigned long max = index | ((1UL << order) - 1);
gfp_t gfp = root_gfp_mask(root);
shift = radix_tree_load_root(root, &child, &maxindex); shift = radix_tree_load_root(root, &child, &maxindex);
...@@ -707,18 +825,18 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index, ...@@ -707,18 +825,18 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
if (order > 0 && max == ((1UL << order) - 1)) if (order > 0 && max == ((1UL << order) - 1))
max++; max++;
if (max > maxindex) { if (max > maxindex) {
int error = radix_tree_extend(root, max, shift); int error = radix_tree_extend(root, gfp, max, shift);
if (error < 0) if (error < 0)
return error; return error;
shift = error; shift = error;
child = root->rnode; child = rcu_dereference_raw(root->rnode);
} }
while (shift > order) { while (shift > order) {
shift -= RADIX_TREE_MAP_SHIFT; shift -= RADIX_TREE_MAP_SHIFT;
if (child == NULL) { if (child == NULL) {
/* Have to add a child node. */ /* Have to add a child node. */
child = radix_tree_node_alloc(root, node, shift, child = radix_tree_node_alloc(gfp, node, root, shift,
offset, 0, 0); offset, 0, 0);
if (!child) if (!child)
return -ENOMEM; return -ENOMEM;
...@@ -741,7 +859,6 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index, ...@@ -741,7 +859,6 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
return 0; return 0;
} }
#ifdef CONFIG_RADIX_TREE_MULTIORDER
/* /*
* Free any nodes below this node. The tree is presumed to not need * Free any nodes below this node. The tree is presumed to not need
* shrinking, and any user data in the tree is presumed to not need a * shrinking, and any user data in the tree is presumed to not need a
...@@ -757,7 +874,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node) ...@@ -757,7 +874,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
struct radix_tree_node *child = entry_to_node(node); struct radix_tree_node *child = entry_to_node(node);
for (;;) { for (;;) {
void *entry = child->slots[offset]; void *entry = rcu_dereference_raw(child->slots[offset]);
if (radix_tree_is_internal_node(entry) && if (radix_tree_is_internal_node(entry) &&
!is_sibling_entry(child, entry)) { !is_sibling_entry(child, entry)) {
child = entry_to_node(entry); child = entry_to_node(entry);
...@@ -777,8 +894,9 @@ static void radix_tree_free_nodes(struct radix_tree_node *node) ...@@ -777,8 +894,9 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
} }
} }
static inline int insert_entries(struct radix_tree_node *node, void **slot, #ifdef CONFIG_RADIX_TREE_MULTIORDER
void *item, unsigned order, bool replace) static inline int insert_entries(struct radix_tree_node *node,
void __rcu **slot, void *item, unsigned order, bool replace)
{ {
struct radix_tree_node *child; struct radix_tree_node *child;
unsigned i, n, tag, offset, tags = 0; unsigned i, n, tag, offset, tags = 0;
...@@ -813,7 +931,7 @@ static inline int insert_entries(struct radix_tree_node *node, void **slot, ...@@ -813,7 +931,7 @@ static inline int insert_entries(struct radix_tree_node *node, void **slot,
} }
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
struct radix_tree_node *old = slot[i]; struct radix_tree_node *old = rcu_dereference_raw(slot[i]);
if (i) { if (i) {
rcu_assign_pointer(slot[i], child); rcu_assign_pointer(slot[i], child);
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
...@@ -840,8 +958,8 @@ static inline int insert_entries(struct radix_tree_node *node, void **slot, ...@@ -840,8 +958,8 @@ static inline int insert_entries(struct radix_tree_node *node, void **slot,
return n; return n;
} }
#else #else
static inline int insert_entries(struct radix_tree_node *node, void **slot, static inline int insert_entries(struct radix_tree_node *node,
void *item, unsigned order, bool replace) void __rcu **slot, void *item, unsigned order, bool replace)
{ {
if (*slot) if (*slot)
return -EEXIST; return -EEXIST;
...@@ -868,7 +986,7 @@ int __radix_tree_insert(struct radix_tree_root *root, unsigned long index, ...@@ -868,7 +986,7 @@ int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
unsigned order, void *item) unsigned order, void *item)
{ {
struct radix_tree_node *node; struct radix_tree_node *node;
void **slot; void __rcu **slot;
int error; int error;
BUG_ON(radix_tree_is_internal_node(item)); BUG_ON(radix_tree_is_internal_node(item));
...@@ -908,16 +1026,17 @@ EXPORT_SYMBOL(__radix_tree_insert); ...@@ -908,16 +1026,17 @@ EXPORT_SYMBOL(__radix_tree_insert);
* allocated and @root->rnode is used as a direct slot instead of * allocated and @root->rnode is used as a direct slot instead of
* pointing to a node, in which case *@nodep will be NULL. * pointing to a node, in which case *@nodep will be NULL.
*/ */
void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, void *__radix_tree_lookup(const struct radix_tree_root *root,
struct radix_tree_node **nodep, void ***slotp) unsigned long index, struct radix_tree_node **nodep,
void __rcu ***slotp)
{ {
struct radix_tree_node *node, *parent; struct radix_tree_node *node, *parent;
unsigned long maxindex; unsigned long maxindex;
void **slot; void __rcu **slot;
restart: restart:
parent = NULL; parent = NULL;
slot = (void **)&root->rnode; slot = (void __rcu **)&root->rnode;
radix_tree_load_root(root, &node, &maxindex); radix_tree_load_root(root, &node, &maxindex);
if (index > maxindex) if (index > maxindex)
return NULL; return NULL;
...@@ -952,9 +1071,10 @@ void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, ...@@ -952,9 +1071,10 @@ void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
* exclusive from other writers. Any dereference of the slot must be done * exclusive from other writers. Any dereference of the slot must be done
* using radix_tree_deref_slot. * using radix_tree_deref_slot.
*/ */
void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root,
unsigned long index)
{ {
void **slot; void __rcu **slot;
if (!__radix_tree_lookup(root, index, NULL, &slot)) if (!__radix_tree_lookup(root, index, NULL, &slot))
return NULL; return NULL;
...@@ -974,75 +1094,76 @@ EXPORT_SYMBOL(radix_tree_lookup_slot); ...@@ -974,75 +1094,76 @@ EXPORT_SYMBOL(radix_tree_lookup_slot);
* them safely). No RCU barriers are required to access or modify the * them safely). No RCU barriers are required to access or modify the
* returned item, however. * returned item, however.
*/ */
void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
{ {
return __radix_tree_lookup(root, index, NULL, NULL); return __radix_tree_lookup(root, index, NULL, NULL);
} }
EXPORT_SYMBOL(radix_tree_lookup); EXPORT_SYMBOL(radix_tree_lookup);
static inline int slot_count(struct radix_tree_node *node, static inline void replace_sibling_entries(struct radix_tree_node *node,
void **slot) void __rcu **slot, int count, int exceptional)
{ {
int n = 1;
#ifdef CONFIG_RADIX_TREE_MULTIORDER #ifdef CONFIG_RADIX_TREE_MULTIORDER
void *ptr = node_to_entry(slot); void *ptr = node_to_entry(slot);
unsigned offset = get_slot_offset(node, slot); unsigned offset = get_slot_offset(node, slot) + 1;
int i;
for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { while (offset < RADIX_TREE_MAP_SIZE) {
if (node->slots[offset + i] != ptr) if (rcu_dereference_raw(node->slots[offset]) != ptr)
break; break;
n++; if (count < 0) {
node->slots[offset] = NULL;
node->count--;
}
node->exceptional += exceptional;
offset++;
} }
#endif #endif
return n;
} }
static void replace_slot(struct radix_tree_root *root, static void replace_slot(void __rcu **slot, void *item,
struct radix_tree_node *node, struct radix_tree_node *node, int count, int exceptional)
void **slot, void *item,
bool warn_typeswitch)
{ {
void *old = rcu_dereference_raw(*slot); if (WARN_ON_ONCE(radix_tree_is_internal_node(item)))
int count, exceptional; return;
WARN_ON_ONCE(radix_tree_is_internal_node(item));
count = !!item - !!old;
exceptional = !!radix_tree_exceptional_entry(item) -
!!radix_tree_exceptional_entry(old);
WARN_ON_ONCE(warn_typeswitch && (count || exceptional));
if (node) { if (node && (count || exceptional)) {
node->count += count; node->count += count;
if (exceptional) {
exceptional *= slot_count(node, slot);
node->exceptional += exceptional; node->exceptional += exceptional;
} replace_sibling_entries(node, slot, count, exceptional);
} }
rcu_assign_pointer(*slot, item); rcu_assign_pointer(*slot, item);
} }
static inline void delete_sibling_entries(struct radix_tree_node *node, static bool node_tag_get(const struct radix_tree_root *root,
void **slot) const struct radix_tree_node *node,
unsigned int tag, unsigned int offset)
{ {
#ifdef CONFIG_RADIX_TREE_MULTIORDER if (node)
bool exceptional = radix_tree_exceptional_entry(*slot); return tag_get(node, tag, offset);
void *ptr = node_to_entry(slot); return root_tag_get(root, tag);
unsigned offset = get_slot_offset(node, slot); }
int i;
for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { /*
if (node->slots[offset + i] != ptr) * IDR users want to be able to store NULL in the tree, so if the slot isn't
break; * free, don't adjust the count, even if it's transitioning between NULL and
node->slots[offset + i] = NULL; * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still
node->count--; * have empty bits, but it only stores NULL in slots when they're being
if (exceptional) * deleted.
node->exceptional--; */
static int calculate_count(struct radix_tree_root *root,
struct radix_tree_node *node, void __rcu **slot,
void *item, void *old)
{
if (is_idr(root)) {
unsigned offset = get_slot_offset(node, slot);
bool free = node_tag_get(root, node, IDR_FREE, offset);
if (!free)
return 0;
if (!old)
return 1;
} }
#endif return !!item - !!old;
} }
/** /**
...@@ -1059,18 +1180,22 @@ static inline void delete_sibling_entries(struct radix_tree_node *node, ...@@ -1059,18 +1180,22 @@ static inline void delete_sibling_entries(struct radix_tree_node *node,
*/ */
void __radix_tree_replace(struct radix_tree_root *root, void __radix_tree_replace(struct radix_tree_root *root,
struct radix_tree_node *node, struct radix_tree_node *node,
void **slot, void *item, void __rcu **slot, void *item,
radix_tree_update_node_t update_node, void *private) radix_tree_update_node_t update_node, void *private)
{ {
if (!item) void *old = rcu_dereference_raw(*slot);
delete_sibling_entries(node, slot); int exceptional = !!radix_tree_exceptional_entry(item) -
!!radix_tree_exceptional_entry(old);
int count = calculate_count(root, node, slot, item, old);
/* /*
* This function supports replacing exceptional entries and * This function supports replacing exceptional entries and
* deleting entries, but that needs accounting against the * deleting entries, but that needs accounting against the
* node unless the slot is root->rnode. * node unless the slot is root->rnode.
*/ */
replace_slot(root, node, slot, item, WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->rnode) &&
!node && slot != (void **)&root->rnode); (count || exceptional));
replace_slot(slot, item, node, count, exceptional);
if (!node) if (!node)
return; return;
...@@ -1098,9 +1223,9 @@ void __radix_tree_replace(struct radix_tree_root *root, ...@@ -1098,9 +1223,9 @@ void __radix_tree_replace(struct radix_tree_root *root,
* radix_tree_iter_replace(). * radix_tree_iter_replace().
*/ */
void radix_tree_replace_slot(struct radix_tree_root *root, void radix_tree_replace_slot(struct radix_tree_root *root,
void **slot, void *item) void __rcu **slot, void *item)
{ {
replace_slot(root, NULL, slot, item, true); __radix_tree_replace(root, NULL, slot, item, NULL, NULL);
} }
EXPORT_SYMBOL(radix_tree_replace_slot); EXPORT_SYMBOL(radix_tree_replace_slot);
...@@ -1114,7 +1239,8 @@ EXPORT_SYMBOL(radix_tree_replace_slot); ...@@ -1114,7 +1239,8 @@ EXPORT_SYMBOL(radix_tree_replace_slot);
* Caller must hold tree write locked across split and replacement. * Caller must hold tree write locked across split and replacement.
*/ */
void radix_tree_iter_replace(struct radix_tree_root *root, void radix_tree_iter_replace(struct radix_tree_root *root,
const struct radix_tree_iter *iter, void **slot, void *item) const struct radix_tree_iter *iter,
void __rcu **slot, void *item)
{ {
__radix_tree_replace(root, iter->node, slot, item, NULL, NULL); __radix_tree_replace(root, iter->node, slot, item, NULL, NULL);
} }
...@@ -1138,7 +1264,7 @@ int radix_tree_join(struct radix_tree_root *root, unsigned long index, ...@@ -1138,7 +1264,7 @@ int radix_tree_join(struct radix_tree_root *root, unsigned long index,
unsigned order, void *item) unsigned order, void *item)
{ {
struct radix_tree_node *node; struct radix_tree_node *node;
void **slot; void __rcu **slot;
int error; int error;
BUG_ON(radix_tree_is_internal_node(item)); BUG_ON(radix_tree_is_internal_node(item));
...@@ -1173,9 +1299,10 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index, ...@@ -1173,9 +1299,10 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
unsigned order) unsigned order)
{ {
struct radix_tree_node *parent, *node, *child; struct radix_tree_node *parent, *node, *child;
void **slot; void __rcu **slot;
unsigned int offset, end; unsigned int offset, end;
unsigned n, tag, tags = 0; unsigned n, tag, tags = 0;
gfp_t gfp = root_gfp_mask(root);
if (!__radix_tree_lookup(root, index, &parent, &slot)) if (!__radix_tree_lookup(root, index, &parent, &slot))
return -ENOENT; return -ENOENT;
...@@ -1189,7 +1316,8 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index, ...@@ -1189,7 +1316,8 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
tags |= 1 << tag; tags |= 1 << tag;
for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) { for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) {
if (!is_sibling_entry(parent, parent->slots[end])) if (!is_sibling_entry(parent,
rcu_dereference_raw(parent->slots[end])))
break; break;
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
if (tags & (1 << tag)) if (tags & (1 << tag))
...@@ -1213,14 +1341,15 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index, ...@@ -1213,14 +1341,15 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
for (;;) { for (;;) {
if (node->shift > order) { if (node->shift > order) {
child = radix_tree_node_alloc(root, node, child = radix_tree_node_alloc(gfp, node, root,
node->shift - RADIX_TREE_MAP_SHIFT, node->shift - RADIX_TREE_MAP_SHIFT,
offset, 0, 0); offset, 0, 0);
if (!child) if (!child)
goto nomem; goto nomem;
if (node != parent) { if (node != parent) {
node->count++; node->count++;
node->slots[offset] = node_to_entry(child); rcu_assign_pointer(node->slots[offset],
node_to_entry(child));
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
if (tags & (1 << tag)) if (tags & (1 << tag))
tag_set(node, tag, offset); tag_set(node, tag, offset);
...@@ -1262,6 +1391,22 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index, ...@@ -1262,6 +1391,22 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
} }
#endif #endif
static void node_tag_set(struct radix_tree_root *root,
struct radix_tree_node *node,
unsigned int tag, unsigned int offset)
{
while (node) {
if (tag_get(node, tag, offset))
return;
tag_set(node, tag, offset);
offset = node->offset;
node = node->parent;
}
if (!root_tag_get(root, tag))
root_tag_set(root, tag);
}
/** /**
* radix_tree_tag_set - set a tag on a radix tree node * radix_tree_tag_set - set a tag on a radix tree node
* @root: radix tree root * @root: radix tree root
...@@ -1303,6 +1448,18 @@ void *radix_tree_tag_set(struct radix_tree_root *root, ...@@ -1303,6 +1448,18 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
} }
EXPORT_SYMBOL(radix_tree_tag_set); EXPORT_SYMBOL(radix_tree_tag_set);
/**
* radix_tree_iter_tag_set - set a tag on the current iterator entry
* @root: radix tree root
* @iter: iterator state
* @tag: tag to set
*/
void radix_tree_iter_tag_set(struct radix_tree_root *root,
const struct radix_tree_iter *iter, unsigned int tag)
{
node_tag_set(root, iter->node, tag, iter_offset(iter));
}
static void node_tag_clear(struct radix_tree_root *root, static void node_tag_clear(struct radix_tree_root *root,
struct radix_tree_node *node, struct radix_tree_node *node,
unsigned int tag, unsigned int offset) unsigned int tag, unsigned int offset)
...@@ -1323,34 +1480,6 @@ static void node_tag_clear(struct radix_tree_root *root, ...@@ -1323,34 +1480,6 @@ static void node_tag_clear(struct radix_tree_root *root,
root_tag_clear(root, tag); root_tag_clear(root, tag);
} }
static void node_tag_set(struct radix_tree_root *root,
struct radix_tree_node *node,
unsigned int tag, unsigned int offset)
{
while (node) {
if (tag_get(node, tag, offset))
return;
tag_set(node, tag, offset);
offset = node->offset;
node = node->parent;
}
if (!root_tag_get(root, tag))
root_tag_set(root, tag);
}
/**
* radix_tree_iter_tag_set - set a tag on the current iterator entry
* @root: radix tree root
* @iter: iterator state
* @tag: tag to set
*/
void radix_tree_iter_tag_set(struct radix_tree_root *root,
const struct radix_tree_iter *iter, unsigned int tag)
{
node_tag_set(root, iter->node, tag, iter_offset(iter));
}
/** /**
* radix_tree_tag_clear - clear a tag on a radix tree node * radix_tree_tag_clear - clear a tag on a radix tree node
* @root: radix tree root * @root: radix tree root
...@@ -1390,6 +1519,18 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, ...@@ -1390,6 +1519,18 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
} }
EXPORT_SYMBOL(radix_tree_tag_clear); EXPORT_SYMBOL(radix_tree_tag_clear);
/**
* radix_tree_iter_tag_clear - clear a tag on the current iterator entry
* @root: radix tree root
* @iter: iterator state
* @tag: tag to clear
*/
void radix_tree_iter_tag_clear(struct radix_tree_root *root,
const struct radix_tree_iter *iter, unsigned int tag)
{
node_tag_clear(root, iter->node, tag, iter_offset(iter));
}
/** /**
* radix_tree_tag_get - get a tag on a radix tree node * radix_tree_tag_get - get a tag on a radix tree node
* @root: radix tree root * @root: radix tree root
...@@ -1405,7 +1546,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear); ...@@ -1405,7 +1546,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
* the RCU lock is held, unless tag modification and node deletion are excluded * the RCU lock is held, unless tag modification and node deletion are excluded
* from concurrency. * from concurrency.
*/ */
int radix_tree_tag_get(struct radix_tree_root *root, int radix_tree_tag_get(const struct radix_tree_root *root,
unsigned long index, unsigned int tag) unsigned long index, unsigned int tag)
{ {
struct radix_tree_node *node, *parent; struct radix_tree_node *node, *parent;
...@@ -1417,8 +1558,6 @@ int radix_tree_tag_get(struct radix_tree_root *root, ...@@ -1417,8 +1558,6 @@ int radix_tree_tag_get(struct radix_tree_root *root,
radix_tree_load_root(root, &node, &maxindex); radix_tree_load_root(root, &node, &maxindex);
if (index > maxindex) if (index > maxindex)
return 0; return 0;
if (node == NULL)
return 0;
while (radix_tree_is_internal_node(node)) { while (radix_tree_is_internal_node(node)) {
unsigned offset; unsigned offset;
...@@ -1426,8 +1565,6 @@ int radix_tree_tag_get(struct radix_tree_root *root, ...@@ -1426,8 +1565,6 @@ int radix_tree_tag_get(struct radix_tree_root *root,
parent = entry_to_node(node); parent = entry_to_node(node);
offset = radix_tree_descend(parent, &node, index); offset = radix_tree_descend(parent, &node, index);
if (!node)
return 0;
if (!tag_get(parent, tag, offset)) if (!tag_get(parent, tag, offset))
return 0; return 0;
if (node == RADIX_TREE_RETRY) if (node == RADIX_TREE_RETRY)
...@@ -1454,6 +1591,11 @@ static void set_iter_tags(struct radix_tree_iter *iter, ...@@ -1454,6 +1591,11 @@ static void set_iter_tags(struct radix_tree_iter *iter,
unsigned tag_long = offset / BITS_PER_LONG; unsigned tag_long = offset / BITS_PER_LONG;
unsigned tag_bit = offset % BITS_PER_LONG; unsigned tag_bit = offset % BITS_PER_LONG;
if (!node) {
iter->tags = 1;
return;
}
iter->tags = node->tags[tag][tag_long] >> tag_bit; iter->tags = node->tags[tag][tag_long] >> tag_bit;
/* This never happens if RADIX_TREE_TAG_LONGS == 1 */ /* This never happens if RADIX_TREE_TAG_LONGS == 1 */
...@@ -1468,8 +1610,8 @@ static void set_iter_tags(struct radix_tree_iter *iter, ...@@ -1468,8 +1610,8 @@ static void set_iter_tags(struct radix_tree_iter *iter,
} }
#ifdef CONFIG_RADIX_TREE_MULTIORDER #ifdef CONFIG_RADIX_TREE_MULTIORDER
static void **skip_siblings(struct radix_tree_node **nodep, static void __rcu **skip_siblings(struct radix_tree_node **nodep,
void **slot, struct radix_tree_iter *iter) void __rcu **slot, struct radix_tree_iter *iter)
{ {
void *sib = node_to_entry(slot - 1); void *sib = node_to_entry(slot - 1);
...@@ -1486,8 +1628,8 @@ static void **skip_siblings(struct radix_tree_node **nodep, ...@@ -1486,8 +1628,8 @@ static void **skip_siblings(struct radix_tree_node **nodep,
return NULL; return NULL;
} }
void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, void __rcu **__radix_tree_next_slot(void __rcu **slot,
unsigned flags) struct radix_tree_iter *iter, unsigned flags)
{ {
unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
struct radix_tree_node *node = rcu_dereference_raw(*slot); struct radix_tree_node *node = rcu_dereference_raw(*slot);
...@@ -1540,20 +1682,20 @@ void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, ...@@ -1540,20 +1682,20 @@ void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter,
} }
EXPORT_SYMBOL(__radix_tree_next_slot); EXPORT_SYMBOL(__radix_tree_next_slot);
#else #else
static void **skip_siblings(struct radix_tree_node **nodep, static void __rcu **skip_siblings(struct radix_tree_node **nodep,
void **slot, struct radix_tree_iter *iter) void __rcu **slot, struct radix_tree_iter *iter)
{ {
return slot; return slot;
} }
#endif #endif
void **radix_tree_iter_resume(void **slot, struct radix_tree_iter *iter) void __rcu **radix_tree_iter_resume(void __rcu **slot,
struct radix_tree_iter *iter)
{ {
struct radix_tree_node *node; struct radix_tree_node *node;
slot++; slot++;
iter->index = __radix_tree_iter_add(iter, 1); iter->index = __radix_tree_iter_add(iter, 1);
node = rcu_dereference_raw(*slot);
skip_siblings(&node, slot, iter); skip_siblings(&node, slot, iter);
iter->next_index = iter->index; iter->next_index = iter->index;
iter->tags = 0; iter->tags = 0;
...@@ -1569,7 +1711,7 @@ EXPORT_SYMBOL(radix_tree_iter_resume); ...@@ -1569,7 +1711,7 @@ EXPORT_SYMBOL(radix_tree_iter_resume);
* @flags: RADIX_TREE_ITER_* flags and tag index * @flags: RADIX_TREE_ITER_* flags and tag index
* Returns: pointer to chunk first slot, or NULL if iteration is over * Returns: pointer to chunk first slot, or NULL if iteration is over
*/ */
void **radix_tree_next_chunk(struct radix_tree_root *root, void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
struct radix_tree_iter *iter, unsigned flags) struct radix_tree_iter *iter, unsigned flags)
{ {
unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
...@@ -1606,7 +1748,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, ...@@ -1606,7 +1748,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
iter->tags = 1; iter->tags = 1;
iter->node = NULL; iter->node = NULL;
__set_iter_shift(iter, 0); __set_iter_shift(iter, 0);
return (void **)&root->rnode; return (void __rcu **)&root->rnode;
} }
do { do {
...@@ -1624,7 +1766,8 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, ...@@ -1624,7 +1766,8 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
offset + 1); offset + 1);
else else
while (++offset < RADIX_TREE_MAP_SIZE) { while (++offset < RADIX_TREE_MAP_SIZE) {
void *slot = node->slots[offset]; void *slot = rcu_dereference_raw(
node->slots[offset]);
if (is_sibling_entry(node, slot)) if (is_sibling_entry(node, slot))
continue; continue;
if (slot) if (slot)
...@@ -1680,11 +1823,11 @@ EXPORT_SYMBOL(radix_tree_next_chunk); ...@@ -1680,11 +1823,11 @@ EXPORT_SYMBOL(radix_tree_next_chunk);
* stored in 'results'. * stored in 'results'.
*/ */
unsigned int unsigned int
radix_tree_gang_lookup(struct radix_tree_root *root, void **results, radix_tree_gang_lookup(const struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items) unsigned long first_index, unsigned int max_items)
{ {
struct radix_tree_iter iter; struct radix_tree_iter iter;
void **slot; void __rcu **slot;
unsigned int ret = 0; unsigned int ret = 0;
if (unlikely(!max_items)) if (unlikely(!max_items))
...@@ -1725,12 +1868,12 @@ EXPORT_SYMBOL(radix_tree_gang_lookup); ...@@ -1725,12 +1868,12 @@ EXPORT_SYMBOL(radix_tree_gang_lookup);
* protection, radix_tree_deref_slot may fail requiring a retry. * protection, radix_tree_deref_slot may fail requiring a retry.
*/ */
unsigned int unsigned int
radix_tree_gang_lookup_slot(struct radix_tree_root *root, radix_tree_gang_lookup_slot(const struct radix_tree_root *root,
void ***results, unsigned long *indices, void __rcu ***results, unsigned long *indices,
unsigned long first_index, unsigned int max_items) unsigned long first_index, unsigned int max_items)
{ {
struct radix_tree_iter iter; struct radix_tree_iter iter;
void **slot; void __rcu **slot;
unsigned int ret = 0; unsigned int ret = 0;
if (unlikely(!max_items)) if (unlikely(!max_items))
...@@ -1762,12 +1905,12 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_slot); ...@@ -1762,12 +1905,12 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
* returns the number of items which were placed at *@results. * returns the number of items which were placed at *@results.
*/ */
unsigned int unsigned int
radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items, unsigned long first_index, unsigned int max_items,
unsigned int tag) unsigned int tag)
{ {
struct radix_tree_iter iter; struct radix_tree_iter iter;
void **slot; void __rcu **slot;
unsigned int ret = 0; unsigned int ret = 0;
if (unlikely(!max_items)) if (unlikely(!max_items))
...@@ -1803,12 +1946,12 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag); ...@@ -1803,12 +1946,12 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
* returns the number of slots which were placed at *@results. * returns the number of slots which were placed at *@results.
*/ */
unsigned int unsigned int
radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root,
unsigned long first_index, unsigned int max_items, void __rcu ***results, unsigned long first_index,
unsigned int tag) unsigned int max_items, unsigned int tag)
{ {
struct radix_tree_iter iter; struct radix_tree_iter iter;
void **slot; void __rcu **slot;
unsigned int ret = 0; unsigned int ret = 0;
if (unlikely(!max_items)) if (unlikely(!max_items))
...@@ -1843,6 +1986,43 @@ void __radix_tree_delete_node(struct radix_tree_root *root, ...@@ -1843,6 +1986,43 @@ void __radix_tree_delete_node(struct radix_tree_root *root,
delete_node(root, node, update_node, private); delete_node(root, node, update_node, private);
} }
static bool __radix_tree_delete(struct radix_tree_root *root,
struct radix_tree_node *node, void __rcu **slot)
{
void *old = rcu_dereference_raw(*slot);
int exceptional = radix_tree_exceptional_entry(old) ? -1 : 0;
unsigned offset = get_slot_offset(node, slot);
int tag;
if (is_idr(root))
node_tag_set(root, node, IDR_FREE, offset);
else
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
node_tag_clear(root, node, tag, offset);
replace_slot(slot, NULL, node, -1, exceptional);
return node && delete_node(root, node, NULL, NULL);
}
/**
* radix_tree_iter_delete - delete the entry at this iterator position
* @root: radix tree root
* @iter: iterator state
* @slot: pointer to slot
*
* Delete the entry at the position currently pointed to by the iterator.
* This may result in the current node being freed; if it is, the iterator
* is advanced so that it will not reference the freed memory. This
* function may be called without any locking if there are no other threads
* which can access this tree.
*/
void radix_tree_iter_delete(struct radix_tree_root *root,
struct radix_tree_iter *iter, void __rcu **slot)
{
if (__radix_tree_delete(root, iter->node, slot))
iter->index = iter->next_index;
}
/** /**
* radix_tree_delete_item - delete an item from a radix tree * radix_tree_delete_item - delete an item from a radix tree
* @root: radix tree root * @root: radix tree root
...@@ -1851,51 +2031,38 @@ void __radix_tree_delete_node(struct radix_tree_root *root, ...@@ -1851,51 +2031,38 @@ void __radix_tree_delete_node(struct radix_tree_root *root,
* *
* Remove @item at @index from the radix tree rooted at @root. * Remove @item at @index from the radix tree rooted at @root.
* *
* Returns the address of the deleted item, or NULL if it was not present * Return: the deleted entry, or %NULL if it was not present
* or the entry at the given @index was not @item. * or the entry at the given @index was not @item.
*/ */
void *radix_tree_delete_item(struct radix_tree_root *root, void *radix_tree_delete_item(struct radix_tree_root *root,
unsigned long index, void *item) unsigned long index, void *item)
{ {
struct radix_tree_node *node; struct radix_tree_node *node = NULL;
unsigned int offset; void __rcu **slot;
void **slot;
void *entry; void *entry;
int tag;
entry = __radix_tree_lookup(root, index, &node, &slot); entry = __radix_tree_lookup(root, index, &node, &slot);
if (!entry) if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
get_slot_offset(node, slot))))
return NULL; return NULL;
if (item && entry != item) if (item && entry != item)
return NULL; return NULL;
if (!node) { __radix_tree_delete(root, node, slot);
root_tag_clear_all(root);
root->rnode = NULL;
return entry;
}
offset = get_slot_offset(node, slot);
/* Clear all tags associated with the item to be deleted. */
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
node_tag_clear(root, node, tag, offset);
__radix_tree_replace(root, node, slot, NULL, NULL, NULL);
return entry; return entry;
} }
EXPORT_SYMBOL(radix_tree_delete_item); EXPORT_SYMBOL(radix_tree_delete_item);
/** /**
* radix_tree_delete - delete an item from a radix tree * radix_tree_delete - delete an entry from a radix tree
* @root: radix tree root * @root: radix tree root
* @index: index key * @index: index key
* *
* Remove the item at @index from the radix tree rooted at @root. * Remove the entry at @index from the radix tree rooted at @root.
* *
* Returns the address of the deleted item, or NULL if it was not present. * Return: The deleted entry, or %NULL if it was not present.
*/ */
void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
{ {
...@@ -1905,15 +2072,14 @@ EXPORT_SYMBOL(radix_tree_delete); ...@@ -1905,15 +2072,14 @@ EXPORT_SYMBOL(radix_tree_delete);
void radix_tree_clear_tags(struct radix_tree_root *root, void radix_tree_clear_tags(struct radix_tree_root *root,
struct radix_tree_node *node, struct radix_tree_node *node,
void **slot) void __rcu **slot)
{ {
if (node) { if (node) {
unsigned int tag, offset = get_slot_offset(node, slot); unsigned int tag, offset = get_slot_offset(node, slot);
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
node_tag_clear(root, node, tag, offset); node_tag_clear(root, node, tag, offset);
} else { } else {
/* Clear root node tags */ root_tag_clear_all(root);
root->gfp_mask &= __GFP_BITS_MASK;
} }
} }
...@@ -1922,12 +2088,147 @@ void radix_tree_clear_tags(struct radix_tree_root *root, ...@@ -1922,12 +2088,147 @@ void radix_tree_clear_tags(struct radix_tree_root *root,
* @root: radix tree root * @root: radix tree root
* @tag: tag to test * @tag: tag to test
*/ */
int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag)
{ {
return root_tag_get(root, tag); return root_tag_get(root, tag);
} }
EXPORT_SYMBOL(radix_tree_tagged); EXPORT_SYMBOL(radix_tree_tagged);
/**
* idr_preload - preload for idr_alloc()
* @gfp_mask: allocation mask to use for preloading
*
* Preallocate memory to use for the next call to idr_alloc(). This function
* returns with preemption disabled. It will be enabled by idr_preload_end().
*/
void idr_preload(gfp_t gfp_mask)
{
__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE);
}
EXPORT_SYMBOL(idr_preload);
/**
* ida_pre_get - reserve resources for ida allocation
* @ida: ida handle
* @gfp: memory allocation flags
*
* This function should be called before calling ida_get_new_above(). If it
* is unable to allocate memory, it will return %0. On success, it returns %1.
*/
int ida_pre_get(struct ida *ida, gfp_t gfp)
{
__radix_tree_preload(gfp, IDA_PRELOAD_SIZE);
/*
* The IDA API has no preload_end() equivalent. Instead,
* ida_get_new() can return -EAGAIN, prompting the caller
* to return to the ida_pre_get() step.
*/
preempt_enable();
if (!this_cpu_read(ida_bitmap)) {
struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
if (!bitmap)
return 0;
bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap);
kfree(bitmap);
}
return 1;
}
EXPORT_SYMBOL(ida_pre_get);
void __rcu **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp, int end)
{
struct radix_tree_node *node = NULL, *child;
void __rcu **slot = (void __rcu **)&root->rnode;
unsigned long maxindex, start = iter->next_index;
unsigned long max = end > 0 ? end - 1 : INT_MAX;
unsigned int shift, offset = 0;
grow:
shift = radix_tree_load_root(root, &child, &maxindex);
if (!radix_tree_tagged(root, IDR_FREE))
start = max(start, maxindex + 1);
if (start > max)
return ERR_PTR(-ENOSPC);
if (start > maxindex) {
int error = radix_tree_extend(root, gfp, start, shift);
if (error < 0)
return ERR_PTR(error);
shift = error;
child = rcu_dereference_raw(root->rnode);
}
while (shift) {
shift -= RADIX_TREE_MAP_SHIFT;
if (child == NULL) {
/* Have to add a child node. */
child = radix_tree_node_alloc(gfp, node, root, shift,
offset, 0, 0);
if (!child)
return ERR_PTR(-ENOMEM);
all_tag_set(child, IDR_FREE);
rcu_assign_pointer(*slot, node_to_entry(child));
if (node)
node->count++;
} else if (!radix_tree_is_internal_node(child))
break;
node = entry_to_node(child);
offset = radix_tree_descend(node, &child, start);
if (!tag_get(node, IDR_FREE, offset)) {
offset = radix_tree_find_next_bit(node, IDR_FREE,
offset + 1);
start = next_index(start, node, offset);
if (start > max)
return ERR_PTR(-ENOSPC);
while (offset == RADIX_TREE_MAP_SIZE) {
offset = node->offset + 1;
node = node->parent;
if (!node)
goto grow;
shift = node->shift;
}
child = rcu_dereference_raw(node->slots[offset]);
}
slot = &node->slots[offset];
}
iter->index = start;
if (node)
iter->next_index = 1 + min(max, (start | node_maxindex(node)));
else
iter->next_index = 1;
iter->node = node;
__set_iter_shift(iter, shift);
set_iter_tags(iter, node, offset, IDR_FREE);
return slot;
}
/**
* idr_destroy - release all internal memory from an IDR
* @idr: idr handle
*
* After this function is called, the IDR is empty, and may be reused or
* the data structure containing it may be freed.
*
* A typical clean-up sequence for objects stored in an idr tree will use
* idr_for_each() to free all objects, if necessary, then idr_destroy() to
* free the memory used to keep track of those objects.
*/
void idr_destroy(struct idr *idr)
{
struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.rnode);
if (radix_tree_is_internal_node(node))
radix_tree_free_nodes(node);
idr->idr_rt.rnode = NULL;
root_tag_set(&idr->idr_rt, IDR_FREE);
}
EXPORT_SYMBOL(idr_destroy);
static void static void
radix_tree_node_ctor(void *arg) radix_tree_node_ctor(void *arg)
{ {
...@@ -1971,10 +2272,12 @@ static int radix_tree_cpu_dead(unsigned int cpu) ...@@ -1971,10 +2272,12 @@ static int radix_tree_cpu_dead(unsigned int cpu)
rtp = &per_cpu(radix_tree_preloads, cpu); rtp = &per_cpu(radix_tree_preloads, cpu);
while (rtp->nr) { while (rtp->nr) {
node = rtp->nodes; node = rtp->nodes;
rtp->nodes = node->private_data; rtp->nodes = node->parent;
kmem_cache_free(radix_tree_node_cachep, node); kmem_cache_free(radix_tree_node_cachep, node);
rtp->nr--; rtp->nr--;
} }
kfree(per_cpu(ida_bitmap, cpu));
per_cpu(ida_bitmap, cpu) = NULL;
return 0; return 0;
} }
......
...@@ -355,10 +355,8 @@ void workingset_update_node(struct radix_tree_node *node, void *private) ...@@ -355,10 +355,8 @@ void workingset_update_node(struct radix_tree_node *node, void *private)
* as node->private_list is protected by &mapping->tree_lock. * as node->private_list is protected by &mapping->tree_lock.
*/ */
if (node->count && node->count == node->exceptional) { if (node->count && node->count == node->exceptional) {
if (list_empty(&node->private_list)) { if (list_empty(&node->private_list))
node->private_data = mapping;
list_lru_add(&shadow_nodes, &node->private_list); list_lru_add(&shadow_nodes, &node->private_list);
}
} else { } else {
if (!list_empty(&node->private_list)) if (!list_empty(&node->private_list))
list_lru_del(&shadow_nodes, &node->private_list); list_lru_del(&shadow_nodes, &node->private_list);
...@@ -436,7 +434,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, ...@@ -436,7 +434,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
*/ */
node = container_of(item, struct radix_tree_node, private_list); node = container_of(item, struct radix_tree_node, private_list);
mapping = node->private_data; mapping = container_of(node->root, struct address_space, page_tree);
/* Coming from the list, invert the lock order */ /* Coming from the list, invert the lock order */
if (!spin_trylock(&mapping->tree_lock)) { if (!spin_trylock(&mapping->tree_lock)) {
......
...@@ -462,9 +462,7 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local, ...@@ -462,9 +462,7 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&local->ack_status_lock, flags); spin_lock_irqsave(&local->ack_status_lock, flags);
skb = idr_find(&local->ack_status_frames, info->ack_frame_id); skb = idr_remove(&local->ack_status_frames, info->ack_frame_id);
if (skb)
idr_remove(&local->ack_status_frames, info->ack_frame_id);
spin_unlock_irqrestore(&local->ack_status_lock, flags); spin_unlock_irqrestore(&local->ack_status_lock, flags);
if (!skb) if (!skb)
......
...@@ -20,4 +20,7 @@ static __always_inline int test_bit(unsigned int nr, const unsigned long *addr) ...@@ -20,4 +20,7 @@ static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
(((unsigned long *)addr)[nr / __BITS_PER_LONG])) != 0; (((unsigned long *)addr)[nr / __BITS_PER_LONG])) != 0;
} }
#define __set_bit(nr, addr) set_bit(nr, addr)
#define __clear_bit(nr, addr) clear_bit(nr, addr)
#endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_ATOMIC_H_ */ #endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_ATOMIC_H_ */
...@@ -12,6 +12,14 @@ ...@@ -12,6 +12,14 @@
unlikely(__ret_warn_on); \ unlikely(__ret_warn_on); \
}) })
#define WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
__WARN_printf("assertion failed at %s:%d\n", \
__FILE__, __LINE__); \
unlikely(__ret_warn_on); \
})
#define WARN_ON_ONCE(condition) ({ \ #define WARN_ON_ONCE(condition) ({ \
static int __warned; \ static int __warned; \
int __ret_warn_once = !!(condition); \ int __ret_warn_once = !!(condition); \
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <string.h> #include <string.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <stdlib.h> #include <stdlib.h>
#include <linux/kernel.h>
#define DECLARE_BITMAP(name,bits) \ #define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)] unsigned long name[BITS_TO_LONGS(bits)]
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
#define _TOOLS_LINUX_BITOPS_H_ #define _TOOLS_LINUX_BITOPS_H_
#include <asm/types.h> #include <asm/types.h>
#include <linux/kernel.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#ifndef __WORDSIZE #ifndef __WORDSIZE
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#endif #endif
#define __user #define __user
#define __rcu
#define __read_mostly
#ifndef __attribute_const__ #ifndef __attribute_const__
# define __attribute_const__ # define __attribute_const__
...@@ -54,6 +56,8 @@ ...@@ -54,6 +56,8 @@
# define unlikely(x) __builtin_expect(!!(x), 0) # define unlikely(x) __builtin_expect(!!(x), 0)
#endif #endif
#define uninitialized_var(x) x = *(&(x))
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
#include <linux/types.h> #include <linux/types.h>
......
#define spinlock_t pthread_mutex_t
#define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER;
#define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x)
#define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x)
generated/map-shift.h
idr.c
idr-test
main main
multiorder
radix-tree.c radix-tree.c
CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address
LDFLAGS += -lpthread -lurcu LDFLAGS += -lpthread -lurcu
TARGETS = main TARGETS = main idr-test multiorder
OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \ CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o
regression1.o regression2.o regression3.o multiorder.o \ OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
iteration_check.o benchmark.o tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
ifdef BENCHMARK ifndef SHIFT
CFLAGS += -DBENCHMARK=1 SHIFT=3
endif endif
targets: $(TARGETS) targets: mapshift $(TARGETS)
main: $(OFILES) main: $(OFILES)
$(CC) $(CFLAGS) $(LDFLAGS) $(OFILES) -o main $(CC) $(CFLAGS) $(LDFLAGS) $^ -o main
idr-test: idr-test.o $(CORE_OFILES)
$(CC) $(CFLAGS) $(LDFLAGS) $^ -o idr-test
multiorder: multiorder.o $(CORE_OFILES)
$(CC) $(CFLAGS) $(LDFLAGS) $^ -o multiorder
clean: clean:
$(RM) -f $(TARGETS) *.o radix-tree.c $(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h
find_next_bit.o: ../../lib/find_bit.c vpath %.c ../../lib
$(CC) $(CFLAGS) -c -o $@ $<
$(OFILES): *.h */*.h \ $(OFILES): *.h */*.h generated/map-shift.h \
../../include/linux/*.h \ ../../include/linux/*.h \
../../../include/linux/radix-tree.h ../../include/asm/*.h \
../../../include/linux/radix-tree.h \
../../../include/linux/idr.h
radix-tree.c: ../../../lib/radix-tree.c radix-tree.c: ../../../lib/radix-tree.c
sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@ sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
idr.c: ../../../lib/idr.c
sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
.PHONY: mapshift
mapshift:
@if ! grep -qw $(SHIFT) generated/map-shift.h; then \
echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \
generated/map-shift.h; \
fi
...@@ -71,7 +71,7 @@ static void benchmark_size(unsigned long size, unsigned long step, int order) ...@@ -71,7 +71,7 @@ static void benchmark_size(unsigned long size, unsigned long step, int order)
tagged = benchmark_iter(&tree, true); tagged = benchmark_iter(&tree, true);
normal = benchmark_iter(&tree, false); normal = benchmark_iter(&tree, false);
printf("Size %ld, step %6ld, order %d tagged %10lld ns, normal %10lld ns\n", printv(2, "Size %ld, step %6ld, order %d tagged %10lld ns, normal %10lld ns\n",
size, step, order, tagged, normal); size, step, order, tagged, normal);
item_kill_tree(&tree); item_kill_tree(&tree);
...@@ -85,8 +85,8 @@ void benchmark(void) ...@@ -85,8 +85,8 @@ void benchmark(void)
128, 256, 512, 12345, 0}; 128, 256, 512, 12345, 0};
int c, s; int c, s;
printf("starting benchmarks\n"); printv(1, "starting benchmarks\n");
printf("RADIX_TREE_MAP_SHIFT = %d\n", RADIX_TREE_MAP_SHIFT); printv(1, "RADIX_TREE_MAP_SHIFT = %d\n", RADIX_TREE_MAP_SHIFT);
for (c = 0; size[c]; c++) for (c = 0; size[c]; c++)
for (s = 0; step[s]; s++) for (s = 0; step[s]; s++)
......
#define CONFIG_RADIX_TREE_MULTIORDER 1 #define CONFIG_RADIX_TREE_MULTIORDER 1
#define CONFIG_SHMEM 1
#define CONFIG_SWAP 1
/*
* idr-test.c: Test the IDR API
* Copyright (c) 2016 Matthew Wilcox <willy@infradead.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/bitmap.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include "test.h"
#define DUMMY_PTR ((void *)0x12)
int item_idr_free(int id, void *p, void *data)
{
struct item *item = p;
assert(item->index == id);
free(p);
return 0;
}
void item_idr_remove(struct idr *idr, int id)
{
struct item *item = idr_find(idr, id);
assert(item->index == id);
idr_remove(idr, id);
free(item);
}
void idr_alloc_test(void)
{
unsigned long i;
DEFINE_IDR(idr);
assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0, 0x4000, GFP_KERNEL) == 0);
assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0x3ffd, 0x4000, GFP_KERNEL) == 0x3ffd);
idr_remove(&idr, 0x3ffd);
idr_remove(&idr, 0);
for (i = 0x3ffe; i < 0x4003; i++) {
int id;
struct item *item;
if (i < 0x4000)
item = item_create(i, 0);
else
item = item_create(i - 0x3fff, 0);
id = idr_alloc_cyclic(&idr, item, 1, 0x4000, GFP_KERNEL);
assert(id == item->index);
}
idr_for_each(&idr, item_idr_free, &idr);
idr_destroy(&idr);
}
void idr_replace_test(void)
{
DEFINE_IDR(idr);
idr_alloc(&idr, (void *)-1, 10, 11, GFP_KERNEL);
idr_replace(&idr, &idr, 10);
idr_destroy(&idr);
}
/*
* Unlike the radix tree, you can put a NULL pointer -- with care -- into
* the IDR. Some interfaces, like idr_find() do not distinguish between
* "present, value is NULL" and "not present", but that's exactly what some
* users want.
*/
void idr_null_test(void)
{
int i;
DEFINE_IDR(idr);
assert(idr_is_empty(&idr));
assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
assert(!idr_is_empty(&idr));
idr_remove(&idr, 0);
assert(idr_is_empty(&idr));
assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
assert(!idr_is_empty(&idr));
idr_destroy(&idr);
assert(idr_is_empty(&idr));
for (i = 0; i < 10; i++) {
assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == i);
}
assert(idr_replace(&idr, DUMMY_PTR, 3) == NULL);
assert(idr_replace(&idr, DUMMY_PTR, 4) == NULL);
assert(idr_replace(&idr, NULL, 4) == DUMMY_PTR);
assert(idr_replace(&idr, DUMMY_PTR, 11) == ERR_PTR(-ENOENT));
idr_remove(&idr, 5);
assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 5);
idr_remove(&idr, 5);
for (i = 0; i < 9; i++) {
idr_remove(&idr, i);
assert(!idr_is_empty(&idr));
}
idr_remove(&idr, 8);
assert(!idr_is_empty(&idr));
idr_remove(&idr, 9);
assert(idr_is_empty(&idr));
assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
assert(idr_replace(&idr, DUMMY_PTR, 3) == ERR_PTR(-ENOENT));
assert(idr_replace(&idr, DUMMY_PTR, 0) == NULL);
assert(idr_replace(&idr, NULL, 0) == DUMMY_PTR);
idr_destroy(&idr);
assert(idr_is_empty(&idr));
for (i = 1; i < 10; i++) {
assert(idr_alloc(&idr, NULL, 1, 0, GFP_KERNEL) == i);
}
idr_destroy(&idr);
assert(idr_is_empty(&idr));
}
void idr_nowait_test(void)
{
unsigned int i;
DEFINE_IDR(idr);
idr_preload(GFP_KERNEL);
for (i = 0; i < 3; i++) {
struct item *item = item_create(i, 0);
assert(idr_alloc(&idr, item, i, i + 1, GFP_NOWAIT) == i);
}
idr_preload_end();
idr_for_each(&idr, item_idr_free, &idr);
idr_destroy(&idr);
}
void idr_checks(void)
{
unsigned long i;
DEFINE_IDR(idr);
for (i = 0; i < 10000; i++) {
struct item *item = item_create(i, 0);
assert(idr_alloc(&idr, item, 0, 20000, GFP_KERNEL) == i);
}
assert(idr_alloc(&idr, DUMMY_PTR, 5, 30, GFP_KERNEL) < 0);
for (i = 0; i < 5000; i++)
item_idr_remove(&idr, i);
idr_remove(&idr, 3);
idr_for_each(&idr, item_idr_free, &idr);
idr_destroy(&idr);
assert(idr_is_empty(&idr));
idr_remove(&idr, 3);
idr_remove(&idr, 0);
for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) {
struct item *item = item_create(i, 0);
assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
}
assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC);
idr_for_each(&idr, item_idr_free, &idr);
idr_destroy(&idr);
idr_destroy(&idr);
assert(idr_is_empty(&idr));
for (i = 1; i < 10000; i++) {
struct item *item = item_create(i, 0);
assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i);
}
idr_for_each(&idr, item_idr_free, &idr);
idr_destroy(&idr);
idr_replace_test();
idr_alloc_test();
idr_null_test();
idr_nowait_test();
}
/*
* Check that we get the correct error when we run out of memory doing
* allocations. To ensure we run out of memory, just "forget" to preload.
* The first test is for not having a bitmap available, and the second test
* is for not being able to allocate a level of the radix tree.
*/
void ida_check_nomem(void)
{
DEFINE_IDA(ida);
int id, err;
err = ida_get_new_above(&ida, 256, &id);
assert(err == -EAGAIN);
err = ida_get_new_above(&ida, 1UL << 30, &id);
assert(err == -EAGAIN);
}
/*
* Check what happens when we fill a leaf and then delete it. This may
* discover mishandling of IDR_FREE.
*/
void ida_check_leaf(void)
{
DEFINE_IDA(ida);
int id;
unsigned long i;
for (i = 0; i < IDA_BITMAP_BITS; i++) {
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new(&ida, &id));
assert(id == i);
}
ida_destroy(&ida);
assert(ida_is_empty(&ida));
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new(&ida, &id));
assert(id == 0);
ida_destroy(&ida);
assert(ida_is_empty(&ida));
}
/*
* Check handling of conversions between exceptional entries and full bitmaps.
*/
void ida_check_conv(void)
{
DEFINE_IDA(ida);
int id;
unsigned long i;
for (i = 0; i < IDA_BITMAP_BITS * 2; i += IDA_BITMAP_BITS) {
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, i + 1, &id));
assert(id == i + 1);
assert(!ida_get_new_above(&ida, i + BITS_PER_LONG, &id));
assert(id == i + BITS_PER_LONG);
ida_remove(&ida, i + 1);
ida_remove(&ida, i + BITS_PER_LONG);
assert(ida_is_empty(&ida));
}
assert(ida_pre_get(&ida, GFP_KERNEL));
for (i = 0; i < IDA_BITMAP_BITS * 2; i++) {
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new(&ida, &id));
assert(id == i);
}
for (i = IDA_BITMAP_BITS * 2; i > 0; i--) {
ida_remove(&ida, i - 1);
}
assert(ida_is_empty(&ida));
for (i = 0; i < IDA_BITMAP_BITS + BITS_PER_LONG - 4; i++) {
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new(&ida, &id));
assert(id == i);
}
for (i = IDA_BITMAP_BITS + BITS_PER_LONG - 4; i > 0; i--) {
ida_remove(&ida, i - 1);
}
assert(ida_is_empty(&ida));
radix_tree_cpu_dead(1);
for (i = 0; i < 1000000; i++) {
int err = ida_get_new(&ida, &id);
if (err == -EAGAIN) {
assert((i % IDA_BITMAP_BITS) == (BITS_PER_LONG - 2));
assert(ida_pre_get(&ida, GFP_KERNEL));
err = ida_get_new(&ida, &id);
} else {
assert((i % IDA_BITMAP_BITS) != (BITS_PER_LONG - 2));
}
assert(!err);
assert(id == i);
}
ida_destroy(&ida);
}
/*
* Check allocations up to and slightly above the maximum allowed (2^31-1) ID.
* Allocating up to 2^31-1 should succeed, and then allocating the next one
* should fail.
*/
void ida_check_max(void)
{
DEFINE_IDA(ida);
int id, err;
unsigned long i, j;
for (j = 1; j < 65537; j *= 2) {
unsigned long base = (1UL << 31) - j;
for (i = 0; i < j; i++) {
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, base, &id));
assert(id == base + i);
}
assert(ida_pre_get(&ida, GFP_KERNEL));
err = ida_get_new_above(&ida, base, &id);
assert(err == -ENOSPC);
ida_destroy(&ida);
assert(ida_is_empty(&ida));
rcu_barrier();
}
}
void ida_check_random(void)
{
DEFINE_IDA(ida);
DECLARE_BITMAP(bitmap, 2048);
int id;
unsigned int i;
time_t s = time(NULL);
repeat:
memset(bitmap, 0, sizeof(bitmap));
for (i = 0; i < 100000; i++) {
int i = rand();
int bit = i & 2047;
if (test_bit(bit, bitmap)) {
__clear_bit(bit, bitmap);
ida_remove(&ida, bit);
} else {
__set_bit(bit, bitmap);
ida_pre_get(&ida, GFP_KERNEL);
assert(!ida_get_new_above(&ida, bit, &id));
assert(id == bit);
}
}
ida_destroy(&ida);
if (time(NULL) < s + 10)
goto repeat;
}
void ida_checks(void)
{
DEFINE_IDA(ida);
int id;
unsigned long i;
radix_tree_cpu_dead(1);
ida_check_nomem();
for (i = 0; i < 10000; i++) {
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new(&ida, &id));
assert(id == i);
}
ida_remove(&ida, 20);
ida_remove(&ida, 21);
for (i = 0; i < 3; i++) {
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new(&ida, &id));
if (i == 2)
assert(id == 10000);
}
for (i = 0; i < 5000; i++)
ida_remove(&ida, i);
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, 5000, &id));
assert(id == 10001);
ida_destroy(&ida);
assert(ida_is_empty(&ida));
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, 1, &id));
assert(id == 1);
ida_remove(&ida, id);
assert(ida_is_empty(&ida));
ida_destroy(&ida);
assert(ida_is_empty(&ida));
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, 1, &id));
ida_destroy(&ida);
assert(ida_is_empty(&ida));
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, 1, &id));
assert(id == 1);
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, 1025, &id));
assert(id == 1025);
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, 10000, &id));
assert(id == 10000);
ida_remove(&ida, 1025);
ida_destroy(&ida);
assert(ida_is_empty(&ida));
ida_check_leaf();
ida_check_max();
ida_check_conv();
ida_check_random();
radix_tree_cpu_dead(1);
}
int __weak main(void)
{
radix_tree_init();
idr_checks();
ida_checks();
rcu_barrier();
if (nr_allocated)
printf("nr_allocated = %d\n", nr_allocated);
return 0;
}
...@@ -177,7 +177,7 @@ void iteration_test(unsigned order, unsigned test_duration) ...@@ -177,7 +177,7 @@ void iteration_test(unsigned order, unsigned test_duration)
{ {
int i; int i;
printf("Running %siteration tests for %d seconds\n", printv(1, "Running %siteration tests for %d seconds\n",
order > 0 ? "multiorder " : "", test_duration); order > 0 ? "multiorder " : "", test_duration);
max_order = order; max_order = order;
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#include <unistd.h> #include <unistd.h>
#include <assert.h> #include <assert.h>
#include <linux/mempool.h> #include <linux/gfp.h>
#include <linux/poison.h> #include <linux/poison.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/radix-tree.h> #include <linux/radix-tree.h>
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
int nr_allocated; int nr_allocated;
int preempt_count; int preempt_count;
int kmalloc_verbose;
int test_verbose;
struct kmem_cache { struct kmem_cache {
pthread_mutex_t lock; pthread_mutex_t lock;
...@@ -22,27 +24,6 @@ struct kmem_cache { ...@@ -22,27 +24,6 @@ struct kmem_cache {
void (*ctor)(void *); void (*ctor)(void *);
}; };
void *mempool_alloc(mempool_t *pool, int gfp_mask)
{
return pool->alloc(gfp_mask, pool->data);
}
void mempool_free(void *element, mempool_t *pool)
{
pool->free(element, pool->data);
}
mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data)
{
mempool_t *ret = malloc(sizeof(*ret));
ret->alloc = alloc_fn;
ret->free = free_fn;
ret->data = pool_data;
return ret;
}
void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
{ {
struct radix_tree_node *node; struct radix_tree_node *node;
...@@ -54,9 +35,9 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) ...@@ -54,9 +35,9 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
if (cachep->nr_objs) { if (cachep->nr_objs) {
cachep->nr_objs--; cachep->nr_objs--;
node = cachep->objs; node = cachep->objs;
cachep->objs = node->private_data; cachep->objs = node->parent;
pthread_mutex_unlock(&cachep->lock); pthread_mutex_unlock(&cachep->lock);
node->private_data = NULL; node->parent = NULL;
} else { } else {
pthread_mutex_unlock(&cachep->lock); pthread_mutex_unlock(&cachep->lock);
node = malloc(cachep->size); node = malloc(cachep->size);
...@@ -65,6 +46,8 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) ...@@ -65,6 +46,8 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
} }
uatomic_inc(&nr_allocated); uatomic_inc(&nr_allocated);
if (kmalloc_verbose)
printf("Allocating %p from slab\n", node);
return node; return node;
} }
...@@ -72,6 +55,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) ...@@ -72,6 +55,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{ {
assert(objp); assert(objp);
uatomic_dec(&nr_allocated); uatomic_dec(&nr_allocated);
if (kmalloc_verbose)
printf("Freeing %p to slab\n", objp);
pthread_mutex_lock(&cachep->lock); pthread_mutex_lock(&cachep->lock);
if (cachep->nr_objs > 10) { if (cachep->nr_objs > 10) {
memset(objp, POISON_FREE, cachep->size); memset(objp, POISON_FREE, cachep->size);
...@@ -79,7 +64,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) ...@@ -79,7 +64,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
} else { } else {
struct radix_tree_node *node = objp; struct radix_tree_node *node = objp;
cachep->nr_objs++; cachep->nr_objs++;
node->private_data = cachep->objs; node->parent = cachep->objs;
cachep->objs = node; cachep->objs = node;
} }
pthread_mutex_unlock(&cachep->lock); pthread_mutex_unlock(&cachep->lock);
...@@ -89,6 +74,8 @@ void *kmalloc(size_t size, gfp_t gfp) ...@@ -89,6 +74,8 @@ void *kmalloc(size_t size, gfp_t gfp)
{ {
void *ret = malloc(size); void *ret = malloc(size);
uatomic_inc(&nr_allocated); uatomic_inc(&nr_allocated);
if (kmalloc_verbose)
printf("Allocating %p from malloc\n", ret);
return ret; return ret;
} }
...@@ -97,6 +84,8 @@ void kfree(void *p) ...@@ -97,6 +84,8 @@ void kfree(void *p)
if (!p) if (!p)
return; return;
uatomic_dec(&nr_allocated); uatomic_dec(&nr_allocated);
if (kmalloc_verbose)
printf("Freeing %p to malloc\n", p);
free(p); free(p);
} }
......
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#include <linux/types.h>
#include <linux/bitops/find.h>
#include <linux/bitops/hweight.h>
#include <linux/kernel.h>
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
/**
* __set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike set_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p |= mask;
}
static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p &= ~mask;
}
/**
* __change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p ^= mask;
}
/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old | mask;
return (old & mask) != 0;
}
/**
* __test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old & ~mask;
return (old & mask) != 0;
}
/* WARNING: non atomic and it can be reordered! */
static inline int __test_and_change_bit(int nr,
volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old ^ mask;
return (old & mask) != 0;
}
/**
* test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
static inline int test_bit(int nr, const volatile unsigned long *addr)
{
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
}
/**
* __ffs - find first bit in word.
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static inline unsigned long __ffs(unsigned long word)
{
int num = 0;
if ((word & 0xffffffff) == 0) {
num += 32;
word >>= 32;
}
if ((word & 0xffff) == 0) {
num += 16;
word >>= 16;
}
if ((word & 0xff) == 0) {
num += 8;
word >>= 8;
}
if ((word & 0xf) == 0) {
num += 4;
word >>= 4;
}
if ((word & 0x3) == 0) {
num += 2;
word >>= 2;
}
if ((word & 0x1) == 0)
num += 1;
return num;
}
unsigned long find_next_bit(const unsigned long *addr,
unsigned long size,
unsigned long offset);
static inline unsigned long hweight_long(unsigned long w)
{
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
}
#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
#ifndef _ASM_GENERIC_BITOPS___FFS_H_
#define _ASM_GENERIC_BITOPS___FFS_H_
#include <asm/types.h>
/**
* __ffs - find first bit in word.
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static inline unsigned long __ffs(unsigned long word)
{
int num = 0;
#if BITS_PER_LONG == 64
if ((word & 0xffffffff) == 0) {
num += 32;
word >>= 32;
}
#endif
if ((word & 0xffff) == 0) {
num += 16;
word >>= 16;
}
if ((word & 0xff) == 0) {
num += 8;
word >>= 8;
}
if ((word & 0xf) == 0) {
num += 4;
word >>= 4;
}
if ((word & 0x3) == 0) {
num += 2;
word >>= 2;
}
if ((word & 0x1) == 0)
num += 1;
return num;
}
#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */
#ifndef _ASM_GENERIC_BITOPS_FFS_H_
#define _ASM_GENERIC_BITOPS_FFS_H_
/**
* ffs - find first bit set
* @x: the word to search
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
static inline int ffs(int x)
{
int r = 1;
if (!x)
return 0;
if (!(x & 0xffff)) {
x >>= 16;
r += 16;
}
if (!(x & 0xff)) {
x >>= 8;
r += 8;
}
if (!(x & 0xf)) {
x >>= 4;
r += 4;
}
if (!(x & 3)) {
x >>= 2;
r += 2;
}
if (!(x & 1)) {
x >>= 1;
r += 1;
}
return r;
}
#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */
#ifndef _ASM_GENERIC_BITOPS_FFZ_H_
#define _ASM_GENERIC_BITOPS_FFZ_H_
/*
* ffz - find first zero in word.
* @word: The word to search
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
#define ffz(x) __ffs(~(x))
#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */
#ifndef _ASM_GENERIC_BITOPS_FIND_H_
#define _ASM_GENERIC_BITOPS_FIND_H_
extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
size, unsigned long offset);
extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
long size, unsigned long offset);
#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
#ifndef _ASM_GENERIC_BITOPS_FLS_H_
#define _ASM_GENERIC_BITOPS_FLS_H_
/**
* fls - find last (most-significant) bit set
* @x: the word to search
*
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static inline int fls(int x)
{
int r = 32;
if (!x)
return 0;
if (!(x & 0xffff0000u)) {
x <<= 16;
r -= 16;
}
if (!(x & 0xff000000u)) {
x <<= 8;
r -= 8;
}
if (!(x & 0xf0000000u)) {
x <<= 4;
r -= 4;
}
if (!(x & 0xc0000000u)) {
x <<= 2;
r -= 2;
}
if (!(x & 0x80000000u)) {
x <<= 1;
r -= 1;
}
return r;
}
#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */
#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
#define _ASM_GENERIC_BITOPS_FLS64_H_
#include <asm/types.h>
static inline int fls64(__u64 x)
{
__u32 h = x >> 32;
if (h)
return fls(h) + 32;
return fls(x);
}
#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
#include <asm/types.h>
extern unsigned int hweight32(unsigned int w);
extern unsigned int hweight16(unsigned int w);
extern unsigned int hweight8(unsigned int w);
extern unsigned long hweight64(__u64 w);
#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
#ifndef _ASM_GENERIC_BITOPS_LE_H_
#define _ASM_GENERIC_BITOPS_LE_H_
#include <asm/types.h>
#include <asm/byteorder.h>
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
#if defined(__LITTLE_ENDIAN)
#define generic_test_le_bit(nr, addr) test_bit(nr, addr)
#define generic___set_le_bit(nr, addr) __set_bit(nr, addr)
#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr)
#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr)
#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr)
#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr)
#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr)
#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset)
#elif defined(__BIG_ENDIAN)
#define generic_test_le_bit(nr, addr) \
test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
#define generic___set_le_bit(nr, addr) \
__set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
#define generic___clear_le_bit(nr, addr) \
__clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
#define generic_test_and_set_le_bit(nr, addr) \
test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
#define generic_test_and_clear_le_bit(nr, addr) \
test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
#define generic___test_and_set_le_bit(nr, addr) \
__test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
#define generic___test_and_clear_le_bit(nr, addr) \
__test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
unsigned long size, unsigned long offset);
#else
#error "Please fix <asm/byteorder.h>"
#endif
#define generic_find_first_zero_le_bit(addr, size) \
generic_find_next_zero_le_bit((addr), (size), 0)
#endif /* _ASM_GENERIC_BITOPS_LE_H_ */
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#include <asm/types.h>
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
/**
* __set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike set_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
*p |= mask;
}
static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
*p &= ~mask;
}
/**
* __change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
*p ^= mask;
}
/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
unsigned long old = *p;
*p = old | mask;
return (old & mask) != 0;
}
/**
* __test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
unsigned long old = *p;
*p = old & ~mask;
return (old & mask) != 0;
}
/* WARNING: non atomic and it can be reordered! */
static inline int __test_and_change_bit(int nr,
volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
unsigned long old = *p;
*p = old ^ mask;
return (old & mask) != 0;
}
/**
* test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
static inline int test_bit(int nr, const volatile unsigned long *addr)
{
return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
}
#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
#ifndef _GFP_H #ifndef _GFP_H
#define _GFP_H #define _GFP_H
#include <linux/types.h>
#define __GFP_BITS_SHIFT 26 #define __GFP_BITS_SHIFT 26
#define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) #define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
...@@ -17,6 +19,8 @@ ...@@ -17,6 +19,8 @@
#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
{ {
......
#include "../../../../include/linux/idr.h"
/* An empty file stub that allows radix-tree.c to compile. */ #define __init
#ifndef _KERNEL_H #ifndef _KERNEL_H
#define _KERNEL_H #define _KERNEL_H
#include <assert.h> #include "../../include/linux/kernel.h"
#include <string.h> #include <string.h>
#include <stdio.h> #include <stdio.h>
#include <stddef.h>
#include <limits.h> #include <limits.h>
#include "../../include/linux/compiler.h" #include <linux/compiler.h>
#include "../../include/linux/err.h" #include <linux/err.h>
#include <linux/bitops.h>
#include <linux/log2.h>
#include "../../../include/linux/kconfig.h" #include "../../../include/linux/kconfig.h"
#ifdef BENCHMARK
#define RADIX_TREE_MAP_SHIFT 6
#else
#define RADIX_TREE_MAP_SHIFT 3
#endif
#ifndef NULL
#define NULL 0
#endif
#define BUG_ON(expr) assert(!(expr))
#define WARN_ON(expr) assert(!(expr))
#define __init
#define __must_check
#define panic(expr)
#define printk printf #define printk printf
#define __force
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
#define pr_debug printk #define pr_debug printk
#define pr_cont printk
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define cpu_relax() barrier()
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type, member) );})
#define min(a, b) ((a) < (b) ? (a) : (b))
#define cond_resched() sched_yield()
static inline int in_interrupt(void)
{
return 0;
}
/*
* This looks more complex than it should be. But we need to
* get the type for the ~ right in round_down (it needs to be
* as wide as the result!), and we want to evaluate the macro
* arguments just once each.
*/
#define __round_mask(x, y) ((__typeof__(x))((y)-1))
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
#define round_down(x, y) ((x) & ~__round_mask(x, y))
#define xchg(ptr, x) uatomic_xchg(ptr, x)
#endif /* _KERNEL_H */ #endif /* _KERNEL_H */
#include <linux/slab.h>
typedef void *(mempool_alloc_t)(int gfp_mask, void *pool_data);
typedef void (mempool_free_t)(void *element, void *pool_data);
typedef struct {
mempool_alloc_t *alloc;
mempool_free_t *free;
void *data;
} mempool_t;
void *mempool_alloc(mempool_t *pool, int gfp_mask);
void mempool_free(void *element, mempool_t *pool);
mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data);
#define DECLARE_PER_CPU(type, val) extern type val
#define DEFINE_PER_CPU(type, val) type val #define DEFINE_PER_CPU(type, val) type val
#define __get_cpu_var(var) var #define __get_cpu_var(var) var
#define this_cpu_ptr(var) var #define this_cpu_ptr(var) var
#define this_cpu_read(var) var
#define this_cpu_xchg(var, val) uatomic_xchg(&var, val)
#define this_cpu_cmpxchg(var, old, new) uatomic_cmpxchg(&var, old, new)
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
#ifndef __LINUX_PREEMPT_H
#define __LINUX_PREEMPT_H
extern int preempt_count; extern int preempt_count;
#define preempt_disable() uatomic_inc(&preempt_count) #define preempt_disable() uatomic_inc(&preempt_count)
#define preempt_enable() uatomic_dec(&preempt_count) #define preempt_enable() uatomic_dec(&preempt_count)
static inline int in_interrupt(void)
{
return 0;
}
#endif /* __LINUX_PREEMPT_H */
#ifndef _TEST_RADIX_TREE_H
#define _TEST_RADIX_TREE_H
#include "generated/map-shift.h"
#include "../../../../include/linux/radix-tree.h" #include "../../../../include/linux/radix-tree.h"
extern int kmalloc_verbose;
extern int test_verbose;
static inline void trace_call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *head))
{
if (kmalloc_verbose)
printf("Delaying free of %p to slab\n", (char *)head -
offsetof(struct radix_tree_node, rcu_head));
call_rcu(head, func);
}
#define printv(verbosity_level, fmt, ...) \
if(test_verbose >= verbosity_level) \
printf(fmt, ##__VA_ARGS__)
#undef call_rcu
#define call_rcu(x, y) trace_call_rcu(x, y)
#endif /* _TEST_RADIX_TREE_H */
#ifndef _TYPES_H
#define _TYPES_H
#include "../../include/linux/types.h"
#define __rcu
#define __read_mostly
static inline void INIT_LIST_HEAD(struct list_head *list)
{
list->next = list;
list->prev = list;
}
typedef struct {
unsigned int x;
} spinlock_t;
#define uninitialized_var(x) x = x
#include <linux/gfp.h>
#endif
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <unistd.h> #include <unistd.h>
#include <time.h> #include <time.h>
#include <assert.h> #include <assert.h>
#include <limits.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/radix-tree.h> #include <linux/radix-tree.h>
...@@ -67,7 +68,7 @@ void big_gang_check(bool long_run) ...@@ -67,7 +68,7 @@ void big_gang_check(bool long_run)
for (i = 0; i < (long_run ? 1000 : 3); i++) { for (i = 0; i < (long_run ? 1000 : 3); i++) {
__big_gang_check(); __big_gang_check();
printf("%d ", i); printv(2, "%d ", i);
fflush(stdout); fflush(stdout);
} }
} }
...@@ -128,14 +129,19 @@ void check_copied_tags(struct radix_tree_root *tree, unsigned long start, unsign ...@@ -128,14 +129,19 @@ void check_copied_tags(struct radix_tree_root *tree, unsigned long start, unsign
putchar('.'); */ putchar('.'); */
if (idx[i] < start || idx[i] > end) { if (idx[i] < start || idx[i] > end) {
if (item_tag_get(tree, idx[i], totag)) { if (item_tag_get(tree, idx[i], totag)) {
printf("%lu-%lu: %lu, tags %d-%d\n", start, end, idx[i], item_tag_get(tree, idx[i], fromtag), item_tag_get(tree, idx[i], totag)); printv(2, "%lu-%lu: %lu, tags %d-%d\n", start,
end, idx[i], item_tag_get(tree, idx[i],
fromtag),
item_tag_get(tree, idx[i], totag));
} }
assert(!item_tag_get(tree, idx[i], totag)); assert(!item_tag_get(tree, idx[i], totag));
continue; continue;
} }
if (item_tag_get(tree, idx[i], fromtag) ^ if (item_tag_get(tree, idx[i], fromtag) ^
item_tag_get(tree, idx[i], totag)) { item_tag_get(tree, idx[i], totag)) {
printf("%lu-%lu: %lu, tags %d-%d\n", start, end, idx[i], item_tag_get(tree, idx[i], fromtag), item_tag_get(tree, idx[i], totag)); printv(2, "%lu-%lu: %lu, tags %d-%d\n", start, end,
idx[i], item_tag_get(tree, idx[i], fromtag),
item_tag_get(tree, idx[i], totag));
} }
assert(!(item_tag_get(tree, idx[i], fromtag) ^ assert(!(item_tag_get(tree, idx[i], fromtag) ^
item_tag_get(tree, idx[i], totag))); item_tag_get(tree, idx[i], totag)));
...@@ -237,7 +243,7 @@ static void __locate_check(struct radix_tree_root *tree, unsigned long index, ...@@ -237,7 +243,7 @@ static void __locate_check(struct radix_tree_root *tree, unsigned long index,
item = item_lookup(tree, index); item = item_lookup(tree, index);
index2 = find_item(tree, item); index2 = find_item(tree, item);
if (index != index2) { if (index != index2) {
printf("index %ld order %d inserted; found %ld\n", printv(2, "index %ld order %d inserted; found %ld\n",
index, order, index2); index, order, index2);
abort(); abort();
} }
...@@ -288,43 +294,48 @@ static void single_thread_tests(bool long_run) ...@@ -288,43 +294,48 @@ static void single_thread_tests(bool long_run)
{ {
int i; int i;
printf("starting single_thread_tests: %d allocated, preempt %d\n", printv(1, "starting single_thread_tests: %d allocated, preempt %d\n",
nr_allocated, preempt_count); nr_allocated, preempt_count);
multiorder_checks(); multiorder_checks();
rcu_barrier(); rcu_barrier();
printf("after multiorder_check: %d allocated, preempt %d\n", printv(2, "after multiorder_check: %d allocated, preempt %d\n",
nr_allocated, preempt_count); nr_allocated, preempt_count);
locate_check(); locate_check();
rcu_barrier(); rcu_barrier();
printf("after locate_check: %d allocated, preempt %d\n", printv(2, "after locate_check: %d allocated, preempt %d\n",
nr_allocated, preempt_count); nr_allocated, preempt_count);
tag_check(); tag_check();
rcu_barrier(); rcu_barrier();
printf("after tag_check: %d allocated, preempt %d\n", printv(2, "after tag_check: %d allocated, preempt %d\n",
nr_allocated, preempt_count); nr_allocated, preempt_count);
gang_check(); gang_check();
rcu_barrier(); rcu_barrier();
printf("after gang_check: %d allocated, preempt %d\n", printv(2, "after gang_check: %d allocated, preempt %d\n",
nr_allocated, preempt_count); nr_allocated, preempt_count);
add_and_check(); add_and_check();
rcu_barrier(); rcu_barrier();
printf("after add_and_check: %d allocated, preempt %d\n", printv(2, "after add_and_check: %d allocated, preempt %d\n",
nr_allocated, preempt_count); nr_allocated, preempt_count);
dynamic_height_check(); dynamic_height_check();
rcu_barrier(); rcu_barrier();
printf("after dynamic_height_check: %d allocated, preempt %d\n", printv(2, "after dynamic_height_check: %d allocated, preempt %d\n",
nr_allocated, preempt_count);
idr_checks();
ida_checks();
rcu_barrier();
printv(2, "after idr_checks: %d allocated, preempt %d\n",
nr_allocated, preempt_count); nr_allocated, preempt_count);
big_gang_check(long_run); big_gang_check(long_run);
rcu_barrier(); rcu_barrier();
printf("after big_gang_check: %d allocated, preempt %d\n", printv(2, "after big_gang_check: %d allocated, preempt %d\n",
nr_allocated, preempt_count); nr_allocated, preempt_count);
for (i = 0; i < (long_run ? 2000 : 3); i++) { for (i = 0; i < (long_run ? 2000 : 3); i++) {
copy_tag_check(); copy_tag_check();
printf("%d ", i); printv(2, "%d ", i);
fflush(stdout); fflush(stdout);
} }
rcu_barrier(); rcu_barrier();
printf("after copy_tag_check: %d allocated, preempt %d\n", printv(2, "after copy_tag_check: %d allocated, preempt %d\n",
nr_allocated, preempt_count); nr_allocated, preempt_count);
} }
...@@ -334,24 +345,28 @@ int main(int argc, char **argv) ...@@ -334,24 +345,28 @@ int main(int argc, char **argv)
int opt; int opt;
unsigned int seed = time(NULL); unsigned int seed = time(NULL);
while ((opt = getopt(argc, argv, "ls:")) != -1) { while ((opt = getopt(argc, argv, "ls:v")) != -1) {
if (opt == 'l') if (opt == 'l')
long_run = true; long_run = true;
else if (opt == 's') else if (opt == 's')
seed = strtoul(optarg, NULL, 0); seed = strtoul(optarg, NULL, 0);
else if (opt == 'v')
test_verbose++;
} }
printf("random seed %u\n", seed); printf("random seed %u\n", seed);
srand(seed); srand(seed);
printf("running tests\n");
rcu_register_thread(); rcu_register_thread();
radix_tree_init(); radix_tree_init();
regression1_test(); regression1_test();
regression2_test(); regression2_test();
regression3_test(); regression3_test();
iteration_test(0, 10); iteration_test(0, 10 + 90 * long_run);
iteration_test(7, 20); iteration_test(7, 10 + 90 * long_run);
single_thread_tests(long_run); single_thread_tests(long_run);
/* Free any remaining preallocated nodes */ /* Free any remaining preallocated nodes */
...@@ -360,9 +375,11 @@ int main(int argc, char **argv) ...@@ -360,9 +375,11 @@ int main(int argc, char **argv)
benchmark(); benchmark();
rcu_barrier(); rcu_barrier();
printf("after rcu_barrier: %d allocated, preempt %d\n", printv(2, "after rcu_barrier: %d allocated, preempt %d\n",
nr_allocated, preempt_count); nr_allocated, preempt_count);
rcu_unregister_thread(); rcu_unregister_thread();
printf("tests completed\n");
exit(0); exit(0);
} }
...@@ -30,7 +30,7 @@ static void __multiorder_tag_test(int index, int order) ...@@ -30,7 +30,7 @@ static void __multiorder_tag_test(int index, int order)
/* our canonical entry */ /* our canonical entry */
base = index & ~((1 << order) - 1); base = index & ~((1 << order) - 1);
printf("Multiorder tag test with index %d, canonical entry %d\n", printv(2, "Multiorder tag test with index %d, canonical entry %d\n",
index, base); index, base);
err = item_insert_order(&tree, index, order); err = item_insert_order(&tree, index, order);
...@@ -150,7 +150,7 @@ static void multiorder_check(unsigned long index, int order) ...@@ -150,7 +150,7 @@ static void multiorder_check(unsigned long index, int order)
struct item *item2 = item_create(min, order); struct item *item2 = item_create(min, order);
RADIX_TREE(tree, GFP_KERNEL); RADIX_TREE(tree, GFP_KERNEL);
printf("Multiorder index %ld, order %d\n", index, order); printv(2, "Multiorder index %ld, order %d\n", index, order);
assert(item_insert_order(&tree, index, order) == 0); assert(item_insert_order(&tree, index, order) == 0);
...@@ -188,7 +188,7 @@ static void multiorder_shrink(unsigned long index, int order) ...@@ -188,7 +188,7 @@ static void multiorder_shrink(unsigned long index, int order)
RADIX_TREE(tree, GFP_KERNEL); RADIX_TREE(tree, GFP_KERNEL);
struct radix_tree_node *node; struct radix_tree_node *node;
printf("Multiorder shrink index %ld, order %d\n", index, order); printv(2, "Multiorder shrink index %ld, order %d\n", index, order);
assert(item_insert_order(&tree, 0, order) == 0); assert(item_insert_order(&tree, 0, order) == 0);
...@@ -209,7 +209,8 @@ static void multiorder_shrink(unsigned long index, int order) ...@@ -209,7 +209,8 @@ static void multiorder_shrink(unsigned long index, int order)
item_check_absent(&tree, i); item_check_absent(&tree, i);
if (!item_delete(&tree, 0)) { if (!item_delete(&tree, 0)) {
printf("failed to delete index %ld (order %d)\n", index, order); abort(); printv(2, "failed to delete index %ld (order %d)\n", index, order);
abort();
} }
for (i = 0; i < 2*max; i++) for (i = 0; i < 2*max; i++)
...@@ -234,7 +235,7 @@ void multiorder_iteration(void) ...@@ -234,7 +235,7 @@ void multiorder_iteration(void)
void **slot; void **slot;
int i, j, err; int i, j, err;
printf("Multiorder iteration test\n"); printv(1, "Multiorder iteration test\n");
#define NUM_ENTRIES 11 #define NUM_ENTRIES 11
int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128}; int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128};
...@@ -275,7 +276,7 @@ void multiorder_tagged_iteration(void) ...@@ -275,7 +276,7 @@ void multiorder_tagged_iteration(void)
void **slot; void **slot;
int i, j; int i, j;
printf("Multiorder tagged iteration test\n"); printv(1, "Multiorder tagged iteration test\n");
#define MT_NUM_ENTRIES 9 #define MT_NUM_ENTRIES 9
int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128}; int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128};
...@@ -355,6 +356,10 @@ void multiorder_tagged_iteration(void) ...@@ -355,6 +356,10 @@ void multiorder_tagged_iteration(void)
item_kill_tree(&tree); item_kill_tree(&tree);
} }
/*
* Basic join checks: make sure we can't find an entry in the tree after
* a larger entry has replaced it
*/
static void multiorder_join1(unsigned long index, static void multiorder_join1(unsigned long index,
unsigned order1, unsigned order2) unsigned order1, unsigned order2)
{ {
...@@ -373,6 +378,10 @@ static void multiorder_join1(unsigned long index, ...@@ -373,6 +378,10 @@ static void multiorder_join1(unsigned long index,
item_kill_tree(&tree); item_kill_tree(&tree);
} }
/*
* Check that the accounting of exceptional entries is handled correctly
* by joining an exceptional entry to a normal pointer.
*/
static void multiorder_join2(unsigned order1, unsigned order2) static void multiorder_join2(unsigned order1, unsigned order2)
{ {
RADIX_TREE(tree, GFP_KERNEL); RADIX_TREE(tree, GFP_KERNEL);
...@@ -386,6 +395,9 @@ static void multiorder_join2(unsigned order1, unsigned order2) ...@@ -386,6 +395,9 @@ static void multiorder_join2(unsigned order1, unsigned order2)
assert(item2 == (void *)0x12UL); assert(item2 == (void *)0x12UL);
assert(node->exceptional == 1); assert(node->exceptional == 1);
item2 = radix_tree_lookup(&tree, 0);
free(item2);
radix_tree_join(&tree, 0, order1, item1); radix_tree_join(&tree, 0, order1, item1);
item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL); item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL);
assert(item2 == item1); assert(item2 == item1);
...@@ -453,7 +465,7 @@ static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc) ...@@ -453,7 +465,7 @@ static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc)
{ {
struct radix_tree_preload *rtp = &radix_tree_preloads; struct radix_tree_preload *rtp = &radix_tree_preloads;
if (rtp->nr != 0) if (rtp->nr != 0)
printf("split(%u %u) remaining %u\n", old_order, new_order, printv(2, "split(%u %u) remaining %u\n", old_order, new_order,
rtp->nr); rtp->nr);
/* /*
* Can't check for equality here as some nodes may have been * Can't check for equality here as some nodes may have been
...@@ -461,7 +473,7 @@ static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc) ...@@ -461,7 +473,7 @@ static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc)
* nodes allocated since they should have all been preloaded. * nodes allocated since they should have all been preloaded.
*/ */
if (nr_allocated > alloc) if (nr_allocated > alloc)
printf("split(%u %u) allocated %u %u\n", old_order, new_order, printv(2, "split(%u %u) allocated %u %u\n", old_order, new_order,
alloc, nr_allocated); alloc, nr_allocated);
} }
...@@ -471,6 +483,7 @@ static void __multiorder_split(int old_order, int new_order) ...@@ -471,6 +483,7 @@ static void __multiorder_split(int old_order, int new_order)
void **slot; void **slot;
struct radix_tree_iter iter; struct radix_tree_iter iter;
unsigned alloc; unsigned alloc;
struct item *item;
radix_tree_preload(GFP_KERNEL); radix_tree_preload(GFP_KERNEL);
assert(item_insert_order(&tree, 0, old_order) == 0); assert(item_insert_order(&tree, 0, old_order) == 0);
...@@ -479,7 +492,7 @@ static void __multiorder_split(int old_order, int new_order) ...@@ -479,7 +492,7 @@ static void __multiorder_split(int old_order, int new_order)
/* Wipe out the preloaded cache or it'll confuse check_mem() */ /* Wipe out the preloaded cache or it'll confuse check_mem() */
radix_tree_cpu_dead(0); radix_tree_cpu_dead(0);
radix_tree_tag_set(&tree, 0, 2); item = radix_tree_tag_set(&tree, 0, 2);
radix_tree_split_preload(old_order, new_order, GFP_KERNEL); radix_tree_split_preload(old_order, new_order, GFP_KERNEL);
alloc = nr_allocated; alloc = nr_allocated;
...@@ -492,6 +505,7 @@ static void __multiorder_split(int old_order, int new_order) ...@@ -492,6 +505,7 @@ static void __multiorder_split(int old_order, int new_order)
radix_tree_preload_end(); radix_tree_preload_end();
item_kill_tree(&tree); item_kill_tree(&tree);
free(item);
} }
static void __multiorder_split2(int old_order, int new_order) static void __multiorder_split2(int old_order, int new_order)
...@@ -633,3 +647,10 @@ void multiorder_checks(void) ...@@ -633,3 +647,10 @@ void multiorder_checks(void)
radix_tree_cpu_dead(0); radix_tree_cpu_dead(0);
} }
int __weak main(void)
{
radix_tree_init();
multiorder_checks();
return 0;
}
...@@ -193,7 +193,7 @@ void regression1_test(void) ...@@ -193,7 +193,7 @@ void regression1_test(void)
long arg; long arg;
/* Regression #1 */ /* Regression #1 */
printf("running regression test 1, should finish in under a minute\n"); printv(1, "running regression test 1, should finish in under a minute\n");
nr_threads = 2; nr_threads = 2;
pthread_barrier_init(&worker_barrier, NULL, nr_threads); pthread_barrier_init(&worker_barrier, NULL, nr_threads);
...@@ -216,5 +216,5 @@ void regression1_test(void) ...@@ -216,5 +216,5 @@ void regression1_test(void)
free(threads); free(threads);
printf("regression test 1, done\n"); printv(1, "regression test 1, done\n");
} }
...@@ -80,7 +80,7 @@ void regression2_test(void) ...@@ -80,7 +80,7 @@ void regression2_test(void)
unsigned long int start, end; unsigned long int start, end;
struct page *pages[1]; struct page *pages[1];
printf("running regression test 2 (should take milliseconds)\n"); printv(1, "running regression test 2 (should take milliseconds)\n");
/* 0. */ /* 0. */
for (i = 0; i <= max_slots - 1; i++) { for (i = 0; i <= max_slots - 1; i++) {
p = page_alloc(); p = page_alloc();
...@@ -103,7 +103,7 @@ void regression2_test(void) ...@@ -103,7 +103,7 @@ void regression2_test(void)
/* 4. */ /* 4. */
for (i = max_slots - 1; i >= 0; i--) for (i = max_slots - 1; i >= 0; i--)
radix_tree_delete(&mt_tree, i); free(radix_tree_delete(&mt_tree, i));
/* 5. */ /* 5. */
// NOTE: start should not be 0 because radix_tree_gang_lookup_tag_slot // NOTE: start should not be 0 because radix_tree_gang_lookup_tag_slot
...@@ -114,7 +114,9 @@ void regression2_test(void) ...@@ -114,7 +114,9 @@ void regression2_test(void)
PAGECACHE_TAG_TOWRITE); PAGECACHE_TAG_TOWRITE);
/* We remove all the remained nodes */ /* We remove all the remained nodes */
radix_tree_delete(&mt_tree, max_slots); free(radix_tree_delete(&mt_tree, max_slots));
printf("regression test 2, done\n"); BUG_ON(!radix_tree_empty(&mt_tree));
printv(1, "regression test 2, done\n");
} }
...@@ -34,21 +34,21 @@ void regression3_test(void) ...@@ -34,21 +34,21 @@ void regression3_test(void)
void **slot; void **slot;
bool first; bool first;
printf("running regression test 3 (should take milliseconds)\n"); printv(1, "running regression test 3 (should take milliseconds)\n");
radix_tree_insert(&root, 0, ptr0); radix_tree_insert(&root, 0, ptr0);
radix_tree_tag_set(&root, 0, 0); radix_tree_tag_set(&root, 0, 0);
first = true; first = true;
radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) { radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) {
printf("tagged %ld %p\n", iter.index, *slot); printv(2, "tagged %ld %p\n", iter.index, *slot);
if (first) { if (first) {
radix_tree_insert(&root, 1, ptr); radix_tree_insert(&root, 1, ptr);
radix_tree_tag_set(&root, 1, 0); radix_tree_tag_set(&root, 1, 0);
first = false; first = false;
} }
if (radix_tree_deref_retry(*slot)) { if (radix_tree_deref_retry(*slot)) {
printf("retry at %ld\n", iter.index); printv(2, "retry at %ld\n", iter.index);
slot = radix_tree_iter_retry(&iter); slot = radix_tree_iter_retry(&iter);
continue; continue;
} }
...@@ -57,13 +57,13 @@ void regression3_test(void) ...@@ -57,13 +57,13 @@ void regression3_test(void)
first = true; first = true;
radix_tree_for_each_slot(slot, &root, &iter, 0) { radix_tree_for_each_slot(slot, &root, &iter, 0) {
printf("slot %ld %p\n", iter.index, *slot); printv(2, "slot %ld %p\n", iter.index, *slot);
if (first) { if (first) {
radix_tree_insert(&root, 1, ptr); radix_tree_insert(&root, 1, ptr);
first = false; first = false;
} }
if (radix_tree_deref_retry(*slot)) { if (radix_tree_deref_retry(*slot)) {
printk("retry at %ld\n", iter.index); printv(2, "retry at %ld\n", iter.index);
slot = radix_tree_iter_retry(&iter); slot = radix_tree_iter_retry(&iter);
continue; continue;
} }
...@@ -72,30 +72,30 @@ void regression3_test(void) ...@@ -72,30 +72,30 @@ void regression3_test(void)
first = true; first = true;
radix_tree_for_each_contig(slot, &root, &iter, 0) { radix_tree_for_each_contig(slot, &root, &iter, 0) {
printk("contig %ld %p\n", iter.index, *slot); printv(2, "contig %ld %p\n", iter.index, *slot);
if (first) { if (first) {
radix_tree_insert(&root, 1, ptr); radix_tree_insert(&root, 1, ptr);
first = false; first = false;
} }
if (radix_tree_deref_retry(*slot)) { if (radix_tree_deref_retry(*slot)) {
printk("retry at %ld\n", iter.index); printv(2, "retry at %ld\n", iter.index);
slot = radix_tree_iter_retry(&iter); slot = radix_tree_iter_retry(&iter);
continue; continue;
} }
} }
radix_tree_for_each_slot(slot, &root, &iter, 0) { radix_tree_for_each_slot(slot, &root, &iter, 0) {
printf("slot %ld %p\n", iter.index, *slot); printv(2, "slot %ld %p\n", iter.index, *slot);
if (!iter.index) { if (!iter.index) {
printf("next at %ld\n", iter.index); printv(2, "next at %ld\n", iter.index);
slot = radix_tree_iter_resume(slot, &iter); slot = radix_tree_iter_resume(slot, &iter);
} }
} }
radix_tree_for_each_contig(slot, &root, &iter, 0) { radix_tree_for_each_contig(slot, &root, &iter, 0) {
printf("contig %ld %p\n", iter.index, *slot); printv(2, "contig %ld %p\n", iter.index, *slot);
if (!iter.index) { if (!iter.index) {
printf("next at %ld\n", iter.index); printv(2, "next at %ld\n", iter.index);
slot = radix_tree_iter_resume(slot, &iter); slot = radix_tree_iter_resume(slot, &iter);
} }
} }
...@@ -103,9 +103,9 @@ void regression3_test(void) ...@@ -103,9 +103,9 @@ void regression3_test(void)
radix_tree_tag_set(&root, 0, 0); radix_tree_tag_set(&root, 0, 0);
radix_tree_tag_set(&root, 1, 0); radix_tree_tag_set(&root, 1, 0);
radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) { radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) {
printf("tagged %ld %p\n", iter.index, *slot); printv(2, "tagged %ld %p\n", iter.index, *slot);
if (!iter.index) { if (!iter.index) {
printf("next at %ld\n", iter.index); printv(2, "next at %ld\n", iter.index);
slot = radix_tree_iter_resume(slot, &iter); slot = radix_tree_iter_resume(slot, &iter);
} }
} }
...@@ -113,5 +113,5 @@ void regression3_test(void) ...@@ -113,5 +113,5 @@ void regression3_test(void)
radix_tree_delete(&root, 0); radix_tree_delete(&root, 0);
radix_tree_delete(&root, 1); radix_tree_delete(&root, 1);
printf("regression test 3 passed\n"); printv(1, "regression test 3 passed\n");
} }
...@@ -49,10 +49,10 @@ void simple_checks(void) ...@@ -49,10 +49,10 @@ void simple_checks(void)
} }
verify_tag_consistency(&tree, 0); verify_tag_consistency(&tree, 0);
verify_tag_consistency(&tree, 1); verify_tag_consistency(&tree, 1);
printf("before item_kill_tree: %d allocated\n", nr_allocated); printv(2, "before item_kill_tree: %d allocated\n", nr_allocated);
item_kill_tree(&tree); item_kill_tree(&tree);
rcu_barrier(); rcu_barrier();
printf("after item_kill_tree: %d allocated\n", nr_allocated); printv(2, "after item_kill_tree: %d allocated\n", nr_allocated);
} }
/* /*
...@@ -257,7 +257,7 @@ static void do_thrash(struct radix_tree_root *tree, char *thrash_state, int tag) ...@@ -257,7 +257,7 @@ static void do_thrash(struct radix_tree_root *tree, char *thrash_state, int tag)
gang_check(tree, thrash_state, tag); gang_check(tree, thrash_state, tag);
printf("%d(%d) %d(%d) %d(%d) %d(%d) / " printv(2, "%d(%d) %d(%d) %d(%d) %d(%d) / "
"%d(%d) present, %d(%d) tagged\n", "%d(%d) present, %d(%d) tagged\n",
insert_chunk, nr_inserted, insert_chunk, nr_inserted,
delete_chunk, nr_deleted, delete_chunk, nr_deleted,
...@@ -296,13 +296,13 @@ static void __leak_check(void) ...@@ -296,13 +296,13 @@ static void __leak_check(void)
{ {
RADIX_TREE(tree, GFP_KERNEL); RADIX_TREE(tree, GFP_KERNEL);
printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated); printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated);
item_insert(&tree, 1000000); item_insert(&tree, 1000000);
printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated); printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated);
item_delete(&tree, 1000000); item_delete(&tree, 1000000);
printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated); printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated);
item_kill_tree(&tree); item_kill_tree(&tree);
printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated); printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated);
} }
static void single_check(void) static void single_check(void)
...@@ -336,15 +336,15 @@ void tag_check(void) ...@@ -336,15 +336,15 @@ void tag_check(void)
extend_checks(); extend_checks();
contract_checks(); contract_checks();
rcu_barrier(); rcu_barrier();
printf("after extend_checks: %d allocated\n", nr_allocated); printv(2, "after extend_checks: %d allocated\n", nr_allocated);
__leak_check(); __leak_check();
leak_check(); leak_check();
rcu_barrier(); rcu_barrier();
printf("after leak_check: %d allocated\n", nr_allocated); printv(2, "after leak_check: %d allocated\n", nr_allocated);
simple_checks(); simple_checks();
rcu_barrier(); rcu_barrier();
printf("after simple_checks: %d allocated\n", nr_allocated); printv(2, "after simple_checks: %d allocated\n", nr_allocated);
thrash_tags(); thrash_tags();
rcu_barrier(); rcu_barrier();
printf("after thrash_tags: %d allocated\n", nr_allocated); printv(2, "after thrash_tags: %d allocated\n", nr_allocated);
} }
...@@ -29,15 +29,28 @@ int __item_insert(struct radix_tree_root *root, struct item *item) ...@@ -29,15 +29,28 @@ int __item_insert(struct radix_tree_root *root, struct item *item)
return __radix_tree_insert(root, item->index, item->order, item); return __radix_tree_insert(root, item->index, item->order, item);
} }
int item_insert(struct radix_tree_root *root, unsigned long index) struct item *item_create(unsigned long index, unsigned int order)
{ {
return __item_insert(root, item_create(index, 0)); struct item *ret = malloc(sizeof(*ret));
ret->index = index;
ret->order = order;
return ret;
} }
int item_insert_order(struct radix_tree_root *root, unsigned long index, int item_insert_order(struct radix_tree_root *root, unsigned long index,
unsigned order) unsigned order)
{ {
return __item_insert(root, item_create(index, order)); struct item *item = item_create(index, order);
int err = __item_insert(root, item);
if (err)
free(item);
return err;
}
int item_insert(struct radix_tree_root *root, unsigned long index)
{
return item_insert_order(root, index, 0);
} }
void item_sanity(struct item *item, unsigned long index) void item_sanity(struct item *item, unsigned long index)
...@@ -61,15 +74,6 @@ int item_delete(struct radix_tree_root *root, unsigned long index) ...@@ -61,15 +74,6 @@ int item_delete(struct radix_tree_root *root, unsigned long index)
return 0; return 0;
} }
struct item *item_create(unsigned long index, unsigned int order)
{
struct item *ret = malloc(sizeof(*ret));
ret->index = index;
ret->order = order;
return ret;
}
void item_check_present(struct radix_tree_root *root, unsigned long index) void item_check_present(struct radix_tree_root *root, unsigned long index)
{ {
struct item *item; struct item *item;
......
...@@ -34,6 +34,8 @@ void tag_check(void); ...@@ -34,6 +34,8 @@ void tag_check(void);
void multiorder_checks(void); void multiorder_checks(void);
void iteration_test(unsigned order, unsigned duration); void iteration_test(unsigned order, unsigned duration);
void benchmark(void); void benchmark(void);
void idr_checks(void);
void ida_checks(void);
struct item * struct item *
item_tag_set(struct radix_tree_root *root, unsigned long index, int tag); item_tag_set(struct radix_tree_root *root, unsigned long index, int tag);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment