Commit 9d21874d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'idr-2018-02-06' of git://git.infradead.org/users/willy/linux-dax

Pull idr updates from Matthew Wilcox:

 - test-suite improvements

 - replace the extended API by improving the normal API

 - performance improvement for IDRs which are 1-based rather than
   0-based

 - add documentation

* 'idr-2018-02-06' of git://git.infradead.org/users/willy/linux-dax:
  idr: Add documentation
  idr: Make 1-based IDRs more efficient
  idr: Warn if old iterators see large IDs
  idr: Rename idr_for_each_entry_ext
  idr: Remove idr_alloc_ext
  cls_u32: Convert to idr_alloc_u32
  cls_u32: Reinstate cyclic allocation
  cls_flower: Convert to idr_alloc_u32
  cls_bpf: Convert to use idr_alloc_u32
  cls_basic: Convert to use idr_alloc_u32
  cls_api: Convert to idr_alloc_u32
  net sched actions: Convert to use idr_alloc_u32
  idr: Add idr_alloc_u32 helper
  idr: Delete idr_find_ext function
  idr: Delete idr_replace_ext function
  idr: Delete idr_remove_ext function
  IDR test suite: Check handling negative end correctly
  idr test suite: Fix ida_test_random()
  radix tree test suite: Remove ARRAY_SIZE
parents 4ed8244e ac665d94
.. SPDX-License-Identifier: CC-BY-SA-4.0
=============
ID Allocation
=============
:Author: Matthew Wilcox
Overview
========
A common problem to solve is allocating identifiers (IDs); generally
small numbers which identify a thing. Examples include file descriptors,
process IDs, packet identifiers in networking protocols, SCSI tags
and device instance numbers. The IDR and the IDA provide a reasonable
solution to the problem to avoid everybody inventing their own. The IDR
provides the ability to map an ID to a pointer, while the IDA provides
only ID allocation, and as a result is much more memory-efficient.
IDR usage
=========
Start by initialising an IDR, either with :c:func:`DEFINE_IDR`
for statically allocated IDRs or :c:func:`idr_init` for dynamically
allocated IDRs.
You can call :c:func:`idr_alloc` to allocate an unused ID. Look up
the pointer you associated with the ID by calling :c:func:`idr_find`
and free the ID by calling :c:func:`idr_remove`.
If you need to change the pointer associated with an ID, you can call
:c:func:`idr_replace`. One common reason to do this is to reserve an
ID by passing a ``NULL`` pointer to the allocation function; initialise the
object with the reserved ID and finally insert the initialised object
into the IDR.
Some users need to allocate IDs larger than ``INT_MAX``. So far all of
these users have been content with a ``UINT_MAX`` limit, and they use
:c:func:`idr_alloc_u32`. If you need IDs that will not fit in a u32,
we will work with you to address your needs.
If you need to allocate IDs sequentially, you can use
:c:func:`idr_alloc_cyclic`. The IDR becomes less efficient when dealing
with larger IDs, so using this function comes at a slight cost.
To perform an action on all pointers used by the IDR, you can
either use the callback-based :c:func:`idr_for_each` or the
iterator-style :c:func:`idr_for_each_entry`. You may need to use
:c:func:`idr_for_each_entry_continue` to continue an iteration. You can
also use :c:func:`idr_get_next` if the iterator doesn't fit your needs.
When you have finished using an IDR, you can call :c:func:`idr_destroy`
to release the memory used by the IDR. This will not free the objects
pointed to from the IDR; if you want to do that, use one of the iterators
to do it.
You can use :c:func:`idr_is_empty` to find out whether there are any
IDs currently allocated.
If you need to take a lock while allocating a new ID from the IDR,
you may need to pass a restrictive set of GFP flags, which can lead
to the IDR being unable to allocate memory. To work around this,
you can call :c:func:`idr_preload` before taking the lock, and then
:c:func:`idr_preload_end` after the allocation.
.. kernel-doc:: include/linux/idr.h
:doc: idr sync
IDA usage
=========
.. kernel-doc:: lib/idr.c
:doc: IDA description
Functions and structures
========================
.. kernel-doc:: include/linux/idr.h
.. kernel-doc:: lib/idr.c
......@@ -16,6 +16,7 @@ Core utilities
atomic_ops
refcount-vs-atomic
cpu_hotplug
idr
local_ops
workqueue
genericirq
......
......@@ -103,18 +103,6 @@ CRC Functions
.. kernel-doc:: lib/crc-itu-t.c
:export:
idr/ida Functions
-----------------
.. kernel-doc:: include/linux/idr.h
:doc: idr sync
.. kernel-doc:: lib/idr.c
:doc: IDA description
.. kernel-doc:: lib/idr.c
:export:
Math Functions in Linux
=======================
......
......@@ -15,10 +15,10 @@
#include <linux/radix-tree.h>
#include <linux/gfp.h>
#include <linux/percpu.h>
#include <linux/bug.h>
struct idr {
struct radix_tree_root idr_rt;
unsigned int idr_base;
unsigned int idr_next;
};
......@@ -31,10 +31,26 @@ struct idr {
/* Set the IDR flag and the IDR_FREE tag */
#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT))
#define IDR_INIT \
{ \
.idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \
#define IDR_INIT_BASE(base) { \
.idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER), \
.idr_base = (base), \
.idr_next = 0, \
}
/**
* IDR_INIT() - Initialise an IDR.
*
* A freshly-initialised IDR contains no IDs.
*/
#define IDR_INIT IDR_INIT_BASE(0)
/**
* DEFINE_IDR() - Define a statically-allocated IDR
* @name: Name of IDR
*
* An IDR defined using this macro is ready for use with no additional
* initialisation required. It contains no IDs.
*/
#define DEFINE_IDR(name) struct idr name = IDR_INIT
/**
......@@ -82,80 +98,52 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val)
void idr_preload(gfp_t gfp_mask);
int idr_alloc_cmn(struct idr *idr, void *ptr, unsigned long *index,
unsigned long start, unsigned long end, gfp_t gfp,
bool ext);
/**
* idr_alloc - allocate an id
* @idr: idr handle
* @ptr: pointer to be associated with the new id
* @start: the minimum id (inclusive)
* @end: the maximum id (exclusive)
* @gfp: memory allocation flags
*
* Allocates an unused ID in the range [start, end). Returns -ENOSPC
* if there are no unused IDs in that range.
*
* Note that @end is treated as max when <= 0. This is to always allow
* using @start + N as @end as long as N is inside integer range.
*
* Simultaneous modifications to the @idr are not allowed and should be
* prevented by the user, usually with a lock. idr_alloc() may be called
* concurrently with read-only accesses to the @idr, such as idr_find() and
* idr_for_each_entry().
*/
static inline int idr_alloc(struct idr *idr, void *ptr,
int start, int end, gfp_t gfp)
{
unsigned long id;
int ret;
if (WARN_ON_ONCE(start < 0))
return -EINVAL;
ret = idr_alloc_cmn(idr, ptr, &id, start, end, gfp, false);
if (ret)
return ret;
return id;
}
static inline int idr_alloc_ext(struct idr *idr, void *ptr,
unsigned long *index,
unsigned long start,
unsigned long end,
gfp_t gfp)
{
return idr_alloc_cmn(idr, ptr, index, start, end, gfp, true);
}
int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t);
int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id,
unsigned long max, gfp_t);
int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
void *idr_remove(struct idr *, unsigned long id);
void *idr_find(const struct idr *, unsigned long id);
int idr_for_each(const struct idr *,
int (*fn)(int id, void *p, void *data), void *data);
void *idr_get_next(struct idr *, int *nextid);
void *idr_get_next_ext(struct idr *idr, unsigned long *nextid);
void *idr_replace(struct idr *, void *, int id);
void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id);
void *idr_get_next_ul(struct idr *, unsigned long *nextid);
void *idr_replace(struct idr *, void *, unsigned long id);
void idr_destroy(struct idr *);
static inline void *idr_remove_ext(struct idr *idr, unsigned long id)
{
return radix_tree_delete_item(&idr->idr_rt, id, NULL);
}
static inline void *idr_remove(struct idr *idr, int id)
/**
* idr_init_base() - Initialise an IDR.
* @idr: IDR handle.
* @base: The base value for the IDR.
*
* This variation of idr_init() creates an IDR which will allocate IDs
* starting at %base.
*/
static inline void idr_init_base(struct idr *idr, int base)
{
return idr_remove_ext(idr, id);
INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
idr->idr_base = base;
idr->idr_next = 0;
}
/**
* idr_init() - Initialise an IDR.
* @idr: IDR handle.
*
* Initialise a dynamically allocated IDR. To initialise a
* statically allocated IDR, use DEFINE_IDR().
*/
static inline void idr_init(struct idr *idr)
{
INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
idr->idr_next = 0;
idr_init_base(idr, 0);
}
/**
* idr_is_empty() - Are there any IDs allocated?
* @idr: IDR handle.
*
* Return: %true if any IDs have been allocated from this IDR.
*/
static inline bool idr_is_empty(const struct idr *idr)
{
return radix_tree_empty(&idr->idr_rt) &&
......@@ -174,50 +162,38 @@ static inline void idr_preload_end(void)
}
/**
* idr_find - return pointer for given id
* @idr: idr handle
* @id: lookup key
*
* Return the pointer given the id it has been registered with. A %NULL
* return indicates that @id is not valid or you passed %NULL in
* idr_get_new().
* idr_for_each_entry() - Iterate over an IDR's elements of a given type.
* @idr: IDR handle.
* @entry: The type * to use as cursor
* @id: Entry ID.
*
* This function can be called under rcu_read_lock(), given that the leaf
* pointers lifetimes are correctly managed.
* @entry and @id do not need to be initialized before the loop, and
* after normal termination @entry is left with the value NULL. This
* is convenient for a "not found" value.
*/
static inline void *idr_find_ext(const struct idr *idr, unsigned long id)
{
return radix_tree_lookup(&idr->idr_rt, id);
}
static inline void *idr_find(const struct idr *idr, int id)
{
return idr_find_ext(idr, id);
}
#define idr_for_each_entry(idr, entry, id) \
for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
/**
* idr_for_each_entry - iterate over an idr's elements of a given type
* @idr: idr handle
* @entry: the type * to use as cursor
* @id: id entry's key
* idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
* @idr: IDR handle.
* @entry: The type * to use as cursor.
* @id: Entry ID.
*
* @entry and @id do not need to be initialized before the loop, and
* after normal terminatinon @entry is left with the value NULL. This
* after normal termination @entry is left with the value NULL. This
* is convenient for a "not found" value.
*/
#define idr_for_each_entry(idr, entry, id) \
for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
#define idr_for_each_entry_ext(idr, entry, id) \
for (id = 0; ((entry) = idr_get_next_ext(idr, &(id))) != NULL; ++id)
#define idr_for_each_entry_ul(idr, entry, id) \
for (id = 0; ((entry) = idr_get_next_ul(idr, &(id))) != NULL; ++id)
/**
* idr_for_each_entry_continue - continue iteration over an idr's elements of a given type
* @idr: idr handle
* @entry: the type * to use as cursor
* @id: id entry's key
* idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type
* @idr: IDR handle.
* @entry: The type * to use as a cursor.
* @id: Entry ID.
*
* Continue to iterate over list of given type, continuing after
* the current position.
* Continue to iterate over entries, continuing after the current position.
*/
#define idr_for_each_entry_continue(idr, entry, id) \
for ((entry) = idr_get_next((idr), &(id)); \
......
......@@ -356,24 +356,9 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index,
int radix_tree_join(struct radix_tree_root *, unsigned long index,
unsigned new_order, void *);
void __rcu **idr_get_free_cmn(struct radix_tree_root *root,
void __rcu **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp,
unsigned long max);
static inline void __rcu **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter,
gfp_t gfp,
int end)
{
return idr_get_free_cmn(root, iter, gfp, end > 0 ? end - 1 : INT_MAX);
}
static inline void __rcu **idr_get_free_ext(struct radix_tree_root *root,
struct radix_tree_iter *iter,
gfp_t gfp,
unsigned long end)
{
return idr_get_free_cmn(root, iter, gfp, end - 1);
}
enum {
RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */
......
This diff is collapsed.
......@@ -24,6 +24,7 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/export.h>
......@@ -2135,7 +2136,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
}
EXPORT_SYMBOL(ida_pre_get);
void __rcu **idr_get_free_cmn(struct radix_tree_root *root,
void __rcu **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp,
unsigned long max)
{
......
......@@ -78,7 +78,7 @@ static void free_tcf(struct tc_action *p)
static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p)
{
spin_lock_bh(&idrinfo->lock);
idr_remove_ext(&idrinfo->action_idr, p->tcfa_index);
idr_remove(&idrinfo->action_idr, p->tcfa_index);
spin_unlock_bh(&idrinfo->lock);
gen_kill_estimator(&p->tcfa_rate_est);
free_tcf(p);
......@@ -124,7 +124,7 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
s_i = cb->args[0];
idr_for_each_entry_ext(idr, p, id) {
idr_for_each_entry_ul(idr, p, id) {
index++;
if (index < s_i)
continue;
......@@ -181,7 +181,7 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
if (nla_put_string(skb, TCA_KIND, ops->kind))
goto nla_put_failure;
idr_for_each_entry_ext(idr, p, id) {
idr_for_each_entry_ul(idr, p, id) {
ret = __tcf_idr_release(p, false, true);
if (ret == ACT_P_DELETED) {
module_put(ops->owner);
......@@ -222,7 +222,7 @@ static struct tc_action *tcf_idr_lookup(u32 index, struct tcf_idrinfo *idrinfo)
struct tc_action *p = NULL;
spin_lock_bh(&idrinfo->lock);
p = idr_find_ext(&idrinfo->action_idr, index);
p = idr_find(&idrinfo->action_idr, index);
spin_unlock_bh(&idrinfo->lock);
return p;
......@@ -274,7 +274,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
struct tcf_idrinfo *idrinfo = tn->idrinfo;
struct idr *idr = &idrinfo->action_idr;
int err = -ENOMEM;
unsigned long idr_index;
if (unlikely(!p))
return -ENOMEM;
......@@ -284,45 +283,28 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
if (cpustats) {
p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
if (!p->cpu_bstats) {
err1:
kfree(p);
return err;
}
p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
if (!p->cpu_qstats) {
err2:
free_percpu(p->cpu_bstats);
if (!p->cpu_bstats)
goto err1;
}
p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
if (!p->cpu_qstats)
goto err2;
}
spin_lock_init(&p->tcfa_lock);
idr_preload(GFP_KERNEL);
spin_lock_bh(&idrinfo->lock);
/* user doesn't specify an index */
if (!index) {
idr_preload(GFP_KERNEL);
spin_lock_bh(&idrinfo->lock);
err = idr_alloc_ext(idr, NULL, &idr_index, 1, 0,
GFP_ATOMIC);
spin_unlock_bh(&idrinfo->lock);
idr_preload_end();
if (err) {
err3:
free_percpu(p->cpu_qstats);
goto err2;
}
p->tcfa_index = idr_index;
index = 1;
err = idr_alloc_u32(idr, NULL, &index, UINT_MAX, GFP_ATOMIC);
} else {
idr_preload(GFP_KERNEL);
spin_lock_bh(&idrinfo->lock);
err = idr_alloc_ext(idr, NULL, NULL, index, index + 1,
GFP_ATOMIC);
spin_unlock_bh(&idrinfo->lock);
idr_preload_end();
if (err)
goto err3;
p->tcfa_index = index;
err = idr_alloc_u32(idr, NULL, &index, index, GFP_ATOMIC);
}
spin_unlock_bh(&idrinfo->lock);
idr_preload_end();
if (err)
goto err3;
p->tcfa_index = index;
p->tcfa_tm.install = jiffies;
p->tcfa_tm.lastuse = jiffies;
p->tcfa_tm.firstuse = 0;
......@@ -330,9 +312,8 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
&p->tcfa_rate_est,
&p->tcfa_lock, NULL, est);
if (err) {
goto err3;
}
if (err)
goto err4;
}
p->idrinfo = idrinfo;
......@@ -340,6 +321,15 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
INIT_LIST_HEAD(&p->list);
*a = p;
return 0;
err4:
idr_remove(idr, index);
err3:
free_percpu(p->cpu_qstats);
err2:
free_percpu(p->cpu_bstats);
err1:
kfree(p);
return err;
}
EXPORT_SYMBOL(tcf_idr_create);
......@@ -348,7 +338,7 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a)
struct tcf_idrinfo *idrinfo = tn->idrinfo;
spin_lock_bh(&idrinfo->lock);
idr_replace_ext(&idrinfo->action_idr, a, a->tcfa_index);
idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
spin_unlock_bh(&idrinfo->lock);
}
EXPORT_SYMBOL(tcf_idr_insert);
......@@ -361,7 +351,7 @@ void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
int ret;
unsigned long id = 1;
idr_for_each_entry_ext(idr, p, id) {
idr_for_each_entry_ul(idr, p, id) {
ret = __tcf_idr_release(p, false, true);
if (ret == ACT_P_DELETED)
module_put(ops->owner);
......
......@@ -381,8 +381,8 @@ static int tcf_block_insert(struct tcf_block *block, struct net *net,
struct tcf_net *tn = net_generic(net, tcf_net_id);
int err;
err = idr_alloc_ext(&tn->idr, block, NULL, block_index,
block_index + 1, GFP_KERNEL);
err = idr_alloc_u32(&tn->idr, block, &block_index, block_index,
GFP_KERNEL);
if (err)
return err;
block->index = block_index;
......@@ -393,7 +393,7 @@ static void tcf_block_remove(struct tcf_block *block, struct net *net)
{
struct tcf_net *tn = net_generic(net, tcf_net_id);
idr_remove_ext(&tn->idr, block->index);
idr_remove(&tn->idr, block->index);
}
static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
......@@ -434,7 +434,7 @@ static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
{
struct tcf_net *tn = net_generic(net, tcf_net_id);
return idr_find_ext(&tn->idr, block_index);
return idr_find(&tn->idr, block_index);
}
static struct tcf_chain *tcf_block_chain_zero(struct tcf_block *block)
......
......@@ -120,7 +120,7 @@ static void basic_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
list_for_each_entry_safe(f, n, &head->flist, link) {
list_del_rcu(&f->link);
tcf_unbind_filter(tp, &f->res);
idr_remove_ext(&head->handle_idr, f->handle);
idr_remove(&head->handle_idr, f->handle);
if (tcf_exts_get_net(&f->exts))
call_rcu(&f->rcu, basic_delete_filter);
else
......@@ -138,7 +138,7 @@ static int basic_delete(struct tcf_proto *tp, void *arg, bool *last,
list_del_rcu(&f->link);
tcf_unbind_filter(tp, &f->res);
idr_remove_ext(&head->handle_idr, f->handle);
idr_remove(&head->handle_idr, f->handle);
tcf_exts_get_net(&f->exts);
call_rcu(&f->rcu, basic_delete_filter);
*last = list_empty(&head->flist);
......@@ -185,7 +185,6 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
struct nlattr *tb[TCA_BASIC_MAX + 1];
struct basic_filter *fold = (struct basic_filter *) *arg;
struct basic_filter *fnew;
unsigned long idr_index;
if (tca[TCA_OPTIONS] == NULL)
return -EINVAL;
......@@ -208,34 +207,30 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
if (err < 0)
goto errout;
if (handle) {
fnew->handle = handle;
if (!fold) {
err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index,
handle, handle + 1, GFP_KERNEL);
if (err)
goto errout;
}
} else {
err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index,
1, 0x7FFFFFFF, GFP_KERNEL);
if (err)
goto errout;
fnew->handle = idr_index;
if (!handle) {
handle = 1;
err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
INT_MAX, GFP_KERNEL);
} else if (!fold) {
err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
handle, GFP_KERNEL);
}
if (err)
goto errout;
fnew->handle = handle;
err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr,
extack);
if (err < 0) {
if (!fold)
idr_remove_ext(&head->handle_idr, fnew->handle);
idr_remove(&head->handle_idr, fnew->handle);
goto errout;
}
*arg = fnew;
if (fold) {
idr_replace_ext(&head->handle_idr, fnew, fnew->handle);
idr_replace(&head->handle_idr, fnew, fnew->handle);
list_replace_rcu(&fold->link, &fnew->link);
tcf_unbind_filter(tp, &fold->res);
tcf_exts_get_net(&fold->exts);
......
......@@ -295,7 +295,7 @@ static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
{
struct cls_bpf_head *head = rtnl_dereference(tp->root);
idr_remove_ext(&head->handle_idr, prog->handle);
idr_remove(&head->handle_idr, prog->handle);
cls_bpf_stop_offload(tp, prog, extack);
list_del_rcu(&prog->link);
tcf_unbind_filter(tp, &prog->res);
......@@ -471,7 +471,6 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
struct cls_bpf_prog *oldprog = *arg;
struct nlattr *tb[TCA_BPF_MAX + 1];
struct cls_bpf_prog *prog;
unsigned long idr_index;
int ret;
if (tca[TCA_OPTIONS] == NULL)
......@@ -498,21 +497,18 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
}
if (handle == 0) {
ret = idr_alloc_ext(&head->handle_idr, prog, &idr_index,
1, 0x7FFFFFFF, GFP_KERNEL);
if (ret)
goto errout;
prog->handle = idr_index;
} else {
if (!oldprog) {
ret = idr_alloc_ext(&head->handle_idr, prog, &idr_index,
handle, handle + 1, GFP_KERNEL);
if (ret)
goto errout;
}
prog->handle = handle;
handle = 1;
ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
INT_MAX, GFP_KERNEL);
} else if (!oldprog) {
ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
handle, GFP_KERNEL);
}
if (ret)
goto errout;
prog->handle = handle;
ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr,
extack);
if (ret < 0)
......@@ -526,7 +522,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
if (oldprog) {
idr_replace_ext(&head->handle_idr, prog, handle);
idr_replace(&head->handle_idr, prog, handle);
list_replace_rcu(&oldprog->link, &prog->link);
tcf_unbind_filter(tp, &oldprog->res);
tcf_exts_get_net(&oldprog->exts);
......@@ -542,7 +538,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
cls_bpf_free_parms(prog);
errout_idr:
if (!oldprog)
idr_remove_ext(&head->handle_idr, prog->handle);
idr_remove(&head->handle_idr, prog->handle);
errout:
tcf_exts_destroy(&prog->exts);
kfree(prog);
......
......@@ -288,7 +288,7 @@ static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
idr_remove_ext(&head->handle_idr, f->handle);
idr_remove(&head->handle_idr, f->handle);
list_del_rcu(&f->list);
if (!tc_skip_hw(f->flags))
fl_hw_destroy_filter(tp, f, extack);
......@@ -334,7 +334,7 @@ static void *fl_get(struct tcf_proto *tp, u32 handle)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
return idr_find_ext(&head->handle_idr, handle);
return idr_find(&head->handle_idr, handle);
}
static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
......@@ -865,7 +865,6 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
struct cls_fl_filter *fnew;
struct nlattr **tb;
struct fl_flow_mask mask = {};
unsigned long idr_index;
int err;
if (!tca[TCA_OPTIONS])
......@@ -896,21 +895,17 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
goto errout;
if (!handle) {
err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index,
1, 0x80000000, GFP_KERNEL);
if (err)
goto errout;
fnew->handle = idr_index;
}
/* user specifies a handle and it doesn't exist */
if (handle && !fold) {
err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index,
handle, handle + 1, GFP_KERNEL);
if (err)
goto errout;
fnew->handle = idr_index;
handle = 1;
err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
INT_MAX, GFP_KERNEL);
} else if (!fold) {
/* user specifies a handle and it doesn't exist */
err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
handle, GFP_KERNEL);
}
if (err)
goto errout;
fnew->handle = handle;
if (tb[TCA_FLOWER_FLAGS]) {
fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
......@@ -966,8 +961,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
*arg = fnew;
if (fold) {
fnew->handle = handle;
idr_replace_ext(&head->handle_idr, fnew, fnew->handle);
idr_replace(&head->handle_idr, fnew, fnew->handle);
list_replace_rcu(&fold->list, &fnew->list);
tcf_unbind_filter(tp, &fold->res);
tcf_exts_get_net(&fold->exts);
......@@ -981,7 +975,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
errout_idr:
if (fnew->handle)
idr_remove_ext(&head->handle_idr, fnew->handle);
idr_remove(&head->handle_idr, fnew->handle);
errout:
tcf_exts_destroy(&fnew->exts);
kfree(fnew);
......
......@@ -316,19 +316,13 @@ static void *u32_get(struct tcf_proto *tp, u32 handle)
return u32_lookup_key(ht, handle);
}
/* Protected by rtnl lock */
static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
{
unsigned long idr_index;
int err;
/* This is only used inside rtnl lock it is safe to increment
* without read _copy_ update semantics
*/
err = idr_alloc_ext(&tp_c->handle_idr, ptr, &idr_index,
1, 0x7FF, GFP_KERNEL);
if (err)
int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
if (id < 0)
return 0;
return (u32)(idr_index | 0x800) << 20;
return (id | 0x800U) << 20;
}
static struct hlist_head *tc_u_common_hash;
......@@ -598,7 +592,7 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
rtnl_dereference(n->next));
tcf_unbind_filter(tp, &n->res);
u32_remove_hw_knode(tp, n, extack);
idr_remove_ext(&ht->handle_idr, n->handle);
idr_remove(&ht->handle_idr, n->handle);
if (tcf_exts_get_net(&n->exts))
call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
else
......@@ -625,7 +619,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
if (phn == ht) {
u32_clear_hw_hnode(tp, ht, extack);
idr_destroy(&ht->handle_idr);
idr_remove_ext(&tp_c->handle_idr, ht->handle);
idr_remove(&tp_c->handle_idr, ht->handle);
RCU_INIT_POINTER(*hn, ht->next);
kfree_rcu(ht, rcu);
return 0;
......@@ -747,19 +741,17 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
{
unsigned long idr_index;
u32 start = htid | 0x800;
u32 index = htid | 0x800;
u32 max = htid | 0xFFF;
u32 min = htid;
if (idr_alloc_ext(&ht->handle_idr, NULL, &idr_index,
start, max + 1, GFP_KERNEL)) {
if (idr_alloc_ext(&ht->handle_idr, NULL, &idr_index,
min + 1, max + 1, GFP_KERNEL))
return max;
if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) {
index = htid + 1;
if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max,
GFP_KERNEL))
index = max;
}
return (u32)idr_index;
return index;
}
static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
......@@ -849,7 +841,7 @@ static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
if (pins->handle == n->handle)
break;
idr_replace_ext(&ht->handle_idr, n, n->handle);
idr_replace(&ht->handle_idr, n, n->handle);
RCU_INIT_POINTER(n->next, pins->next);
rcu_assign_pointer(*ins, n);
}
......@@ -1010,8 +1002,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
return -ENOMEM;
}
} else {
err = idr_alloc_ext(&tp_c->handle_idr, ht, NULL,
handle, handle + 1, GFP_KERNEL);
err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
handle, GFP_KERNEL);
if (err) {
kfree(ht);
return err;
......@@ -1027,7 +1019,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
err = u32_replace_hw_hnode(tp, ht, flags, extack);
if (err) {
idr_remove_ext(&tp_c->handle_idr, handle);
idr_remove(&tp_c->handle_idr, handle);
kfree(ht);
return err;
}
......@@ -1067,8 +1059,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
return -EINVAL;
}
handle = htid | TC_U32_NODE(handle);
err = idr_alloc_ext(&ht->handle_idr, NULL, NULL,
handle, handle + 1,
err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle,
GFP_KERNEL);
if (err)
return err;
......@@ -1163,7 +1154,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
#endif
kfree(n);
erridr:
idr_remove_ext(&ht->handle_idr, handle);
idr_remove(&ht->handle_idr, handle);
return err;
}
......
......@@ -153,11 +153,12 @@ void idr_nowait_test(void)
idr_destroy(&idr);
}
void idr_get_next_test(void)
void idr_get_next_test(int base)
{
unsigned long i;
int nextid;
DEFINE_IDR(idr);
idr_init_base(&idr, base);
int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0};
......@@ -207,6 +208,7 @@ void idr_checks(void)
assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
}
assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC);
assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i + 10, GFP_KERNEL) == -ENOSPC);
idr_for_each(&idr, item_idr_free, &idr);
idr_destroy(&idr);
......@@ -214,6 +216,23 @@ void idr_checks(void)
assert(idr_is_empty(&idr));
idr_set_cursor(&idr, INT_MAX - 3UL);
for (i = INT_MAX - 3UL; i < INT_MAX + 3UL; i++) {
struct item *item;
unsigned int id;
if (i <= INT_MAX)
item = item_create(i, 0);
else
item = item_create(i - INT_MAX - 1, 0);
id = idr_alloc_cyclic(&idr, item, 0, 0, GFP_KERNEL);
assert(id == item->index);
}
idr_for_each(&idr, item_idr_free, &idr);
idr_destroy(&idr);
assert(idr_is_empty(&idr));
for (i = 1; i < 10000; i++) {
struct item *item = item_create(i, 0);
assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i);
......@@ -226,7 +245,9 @@ void idr_checks(void)
idr_alloc_test();
idr_null_test();
idr_nowait_test();
idr_get_next_test();
idr_get_next_test(0);
idr_get_next_test(1);
idr_get_next_test(4);
}
/*
......@@ -380,7 +401,7 @@ void ida_check_random(void)
do {
ida_pre_get(&ida, GFP_KERNEL);
err = ida_get_new_above(&ida, bit, &id);
} while (err == -ENOMEM);
} while (err == -EAGAIN);
assert(!err);
assert(id == bit);
}
......@@ -489,7 +510,7 @@ static void *ida_random_fn(void *arg)
void ida_thread_tests(void)
{
pthread_t threads[10];
pthread_t threads[20];
int i;
for (i = 0; i < ARRAY_SIZE(threads); i++)
......
......@@ -17,6 +17,4 @@
#define pr_debug printk
#define pr_cont printk
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#endif /* _KERNEL_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment