Commit cbe5e610 authored by Lars Ellenberg's avatar Lars Ellenberg Committed by Jens Axboe

lru_cache: introduce lc_get_cumulative()

New helper to be able to consolidate more updates
into a single transaction.
Without this, we can only grab a single refcount
on an updated element while preparing a transaction.

lc_get_cumulative - like lc_get; also finds to-be-changed elements
  @lc: the lru cache to operate on
  @enr: the label to look up

  Unlike lc_get this also returns the element for @enr, if it is belonging to
  a pending transaction, so the return values are like for lc_get(),
  plus:

  pointer to an element already on the "to_be_changed" list.
	  In this case, the cache was already marked %LC_DIRTY.

  Caller needs to make sure that the pending transaction is completed,
  before proceeding to actually use this element.
Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>

Fixed up by Jens to export lc_get_cumulative().
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 779b3fe4
...@@ -256,6 +256,7 @@ extern void lc_destroy(struct lru_cache *lc); ...@@ -256,6 +256,7 @@ extern void lc_destroy(struct lru_cache *lc);
extern void lc_set(struct lru_cache *lc, unsigned int enr, int index); extern void lc_set(struct lru_cache *lc, unsigned int enr, int index);
extern void lc_del(struct lru_cache *lc, struct lc_element *element); extern void lc_del(struct lru_cache *lc, struct lc_element *element);
extern struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr);
extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr); extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr);
extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr); extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr);
extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr); extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr);
......
...@@ -365,7 +365,13 @@ static int lc_unused_element_available(struct lru_cache *lc) ...@@ -365,7 +365,13 @@ static int lc_unused_element_available(struct lru_cache *lc)
return 0; return 0;
} }
static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, bool may_change) /* used as internal flags to __lc_get */
enum {
LC_GET_MAY_CHANGE = 1,
LC_GET_MAY_USE_UNCOMMITTED = 2,
};
static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, unsigned int flags)
{ {
struct lc_element *e; struct lc_element *e;
...@@ -380,22 +386,31 @@ static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, bool ...@@ -380,22 +386,31 @@ static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, bool
* this enr is currently being pulled in already, * this enr is currently being pulled in already,
* and will be available once the pending transaction * and will be available once the pending transaction
* has been committed. */ * has been committed. */
if (e && e->lc_new_number == e->lc_number) { if (e) {
if (e->lc_new_number != e->lc_number) {
/* It has been found above, but on the "to_be_changed"
* list, not yet committed. Don't pull it in twice,
* wait for the transaction, then try again...
*/
if (!(flags & LC_GET_MAY_USE_UNCOMMITTED))
RETURN(NULL);
/* ... unless the caller is aware of the implications,
* probably preparing a cumulative transaction. */
++e->refcnt;
++lc->hits;
RETURN(e);
}
/* else: lc_new_number == lc_number; a real hit. */
++lc->hits; ++lc->hits;
if (e->refcnt++ == 0) if (e->refcnt++ == 0)
lc->used++; lc->used++;
list_move(&e->list, &lc->in_use); /* Not evictable... */ list_move(&e->list, &lc->in_use); /* Not evictable... */
RETURN(e); RETURN(e);
} }
/* e == NULL */
++lc->misses; ++lc->misses;
if (!may_change) if (!(flags & LC_GET_MAY_CHANGE))
RETURN(NULL);
/* It has been found above, but on the "to_be_changed" list, not yet
* committed. Don't pull it in twice, wait for the transaction, then
* try again */
if (e)
RETURN(NULL); RETURN(NULL);
/* To avoid races with lc_try_lock(), first, mark us dirty /* To avoid races with lc_try_lock(), first, mark us dirty
...@@ -477,7 +492,27 @@ static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, bool ...@@ -477,7 +492,27 @@ static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, bool
*/ */
struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr) struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr)
{ {
return __lc_get(lc, enr, 1); return __lc_get(lc, enr, LC_GET_MAY_CHANGE);
}
/**
* lc_get_cumulative - like lc_get; also finds to-be-changed elements
* @lc: the lru cache to operate on
* @enr: the label to look up
*
* Unlike lc_get this also returns the element for @enr, if it is belonging to
* a pending transaction, so the return values are like for lc_get(),
* plus:
*
* pointer to an element already on the "to_be_changed" list.
* In this case, the cache was already marked %LC_DIRTY.
*
* Caller needs to make sure that the pending transaction is completed,
* before proceeding to actually use this element.
*/
struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr)
{
return __lc_get(lc, enr, LC_GET_MAY_CHANGE|LC_GET_MAY_USE_UNCOMMITTED);
} }
/** /**
...@@ -648,3 +683,4 @@ EXPORT_SYMBOL(lc_seq_printf_stats); ...@@ -648,3 +683,4 @@ EXPORT_SYMBOL(lc_seq_printf_stats);
EXPORT_SYMBOL(lc_seq_dump_details); EXPORT_SYMBOL(lc_seq_dump_details);
EXPORT_SYMBOL(lc_try_lock); EXPORT_SYMBOL(lc_try_lock);
EXPORT_SYMBOL(lc_is_used); EXPORT_SYMBOL(lc_is_used);
EXPORT_SYMBOL(lc_get_cumulative);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment