Commit 30aa9c52 authored by Oleg Drokin's avatar Oleg Drokin Committed by Greg Kroah-Hartman

staging/lustre/osc: Adjust comments to better conform to coding style

This patch fixes "Block comments use a trailing */ on a separate line"
warnings from checkpatch
Signed-off-by: default avatarOleg Drokin <green@linuxhacker.ru>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 35f0d1ab
......@@ -69,10 +69,12 @@ struct osc_io {
/** true if this io is lockless. */
int oi_lockless;
/** active extents, we know how many bytes is going to be written,
* so having an active extent will prevent it from being fragmented */
* so having an active extent will prevent it from being fragmented
*/
struct osc_extent *oi_active;
/** partially truncated extent, we need to hold this extent to prevent
* page writeback from happening. */
* page writeback from happening.
*/
struct osc_extent *oi_trunc;
struct obd_info oi_info;
......@@ -154,7 +156,8 @@ struct osc_object {
atomic_t oo_nr_writes;
/** Protect extent tree. Will be used to protect
* oo_{read|write}_pages soon. */
* oo_{read|write}_pages soon.
*/
spinlock_t oo_lock;
};
......@@ -627,22 +630,26 @@ struct osc_extent {
oe_srvlock:1,
oe_memalloc:1,
/** an ACTIVE extent is going to be truncated, so when this extent
* is released, it will turn into TRUNC state instead of CACHE. */
* is released, it will turn into TRUNC state instead of CACHE.
*/
oe_trunc_pending:1,
/** this extent should be written asap and someone may wait for the
* write to finish. This bit is usually set along with urgent if
* the extent was CACHE state.
* fsync_wait extent can't be merged because new extent region may
* exceed fsync range. */
* exceed fsync range.
*/
oe_fsync_wait:1,
/** covering lock is being canceled */
oe_hp:1,
/** this extent should be written back asap. set if one of pages is
* called by page WB daemon, or sync write or reading requests. */
* called by page WB daemon, or sync write or reading requests.
*/
oe_urgent:1;
/** how many grants allocated for this extent.
* Grant allocated for this extent. There is no grant allocated
* for reading extents and sync write extents. */
* for reading extents and sync write extents.
*/
unsigned int oe_grants;
/** # of dirty pages in this extent */
unsigned int oe_nr_pages;
......@@ -655,21 +662,25 @@ struct osc_extent {
struct osc_page *oe_next_page;
/** start and end index of this extent, include start and end
* themselves. Page offset here is the page index of osc_pages.
* oe_start is used as keyword for red-black tree. */
* oe_start is used as keyword for red-black tree.
*/
pgoff_t oe_start;
pgoff_t oe_end;
/** maximum ending index of this extent, this is limited by
* max_pages_per_rpc, lock extent and chunk size. */
* max_pages_per_rpc, lock extent and chunk size.
*/
pgoff_t oe_max_end;
/** waitqueue - for those who want to be notified if this extent's
* state has changed. */
* state has changed.
*/
wait_queue_head_t oe_waitq;
/** lock covering this extent */
struct cl_lock *oe_osclock;
/** terminator of this extent. Must be true if this extent is in IO. */
struct task_struct *oe_owner;
/** return value of writeback. If somebody is waiting for this extent,
* this value can be known by outside world. */
* this value can be known by outside world.
*/
int oe_rc;
/** max pages per rpc when this extent was created */
unsigned int oe_mppr;
......
......@@ -47,11 +47,13 @@ struct lu_env;
enum async_flags {
ASYNC_READY = 0x1, /* ap_make_ready will not be called before this
page is added to an rpc */
* page is added to an rpc
*/
ASYNC_URGENT = 0x2, /* page must be put into an RPC before return */
ASYNC_COUNT_STABLE = 0x4, /* ap_refresh_count will not be called
to give the caller a chance to update
or cancel the size of the io */
* to give the caller a chance to update
* or cancel the size of the io
*/
ASYNC_HP = 0x10,
};
......
......@@ -272,7 +272,8 @@ static int osc_io_prepare_write(const struct lu_env *env,
/* this page contains `invalid' data, but who cares?
* nobody can access the invalid data.
* in osc_io_commit_write(), we're going to write exact
* [from, to) bytes of this page to OST. -jay */
* [from, to) bytes of this page to OST. -jay
*/
cl_page_export(env, slice->cpl_page, 1);
return result;
......@@ -596,7 +597,8 @@ static int osc_io_fsync_start(const struct lu_env *env,
* send OST_SYNC RPC. This is bad because it causes extents
* to be written osc by osc. However, we usually start
* writeback before CL_FSYNC_ALL so this won't have any real
* problem. */
* problem.
*/
rc = osc_cache_wait_range(env, osc, start, end);
if (result == 0)
result = rc;
......
......@@ -154,7 +154,8 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
olck->ols_lock = NULL;
/* wb(); --- for all who checks (ols->ols_lock != NULL) before
* call to osc_lock_detach() */
* call to osc_lock_detach()
*/
dlmlock->l_ast_data = NULL;
olck->ols_handle.cookie = 0ULL;
spin_unlock(&osc_ast_guard);
......@@ -169,7 +170,8 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
/* Must get the value under the lock to avoid possible races. */
old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
/* Update the kms. Need to loop all granted locks.
* Not a problem for the client */
* Not a problem for the client
*/
attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
cl_object_attr_set(env, obj, attr, CAT_KMS);
......@@ -362,7 +364,8 @@ static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
*lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
size = lvb->lvb_size;
/* Extend KMS up to the end of this lock and no further
* A lock on [x,y] means a KMS of up to y + 1 bytes! */
* A lock on [x,y] means a KMS of up to y + 1 bytes!
*/
if (size > dlmlock->l_policy_data.l_extent.end)
size = dlmlock->l_policy_data.l_extent.end + 1;
if (size >= oinfo->loi_kms) {
......@@ -426,7 +429,8 @@ static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
* to take a semaphore on a parent lock. This is safe, because
* spin-locks are needed to protect consistency of
* dlmlock->l_*_mode and LVB, and we have finished processing
* them. */
* them.
*/
unlock_res_and_lock(dlmlock);
cl_lock_modify(env, lock, descr);
cl_lock_signal(env, lock);
......@@ -467,7 +471,8 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
olck->ols_hold = 1;
/* lock reference taken by ldlm_handle2lock_long() is owned by
* osc_lock and released in osc_lock_detach() */
* osc_lock and released in osc_lock_detach()
*/
lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
olck->ols_has_ref = 1;
}
......@@ -545,7 +550,8 @@ static int osc_lock_upcall(void *cookie, int errcode)
/* For AGL case, the RPC sponsor may exits the cl_lock
* processing without wait() called before related OSC
* lock upcall(). So update the lock status according
* to the enqueue result inside AGL upcall(). */
* to the enqueue result inside AGL upcall().
*/
if (olck->ols_agl) {
lock->cll_flags |= CLF_FROM_UPCALL;
cl_wait_try(env, lock);
......@@ -568,7 +574,8 @@ static int osc_lock_upcall(void *cookie, int errcode)
lu_ref_del(&lock->cll_reference, "upcall", lock);
/* This maybe the last reference, so must be called after
* cl_lock_mutex_put(). */
* cl_lock_mutex_put().
*/
cl_lock_put(env, lock);
cl_env_nested_put(&nest, env);
......@@ -854,7 +861,8 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
* BTW, it's okay for cl_lock to be cancelled during
* this period because server can handle this race.
* See ldlm_server_glimpse_ast() for details.
* cl_lock_mutex_get(env, lock); */
* cl_lock_mutex_get(env, lock);
*/
cap = &req->rq_pill;
req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
......@@ -1014,7 +1022,8 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
LASSERT(cl_lock_is_mutexed(lock));
/* make it enqueue anyway for glimpse lock, because we actually
* don't need to cancel any conflicting locks. */
* don't need to cancel any conflicting locks.
*/
if (olck->ols_glimpse)
return 0;
......@@ -1048,7 +1057,8 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
* imagine that client has PR lock on [0, 1000], and thread T0
* is doing lockless IO in [500, 1500] region. Concurrent
* thread T1 can see lockless data in [500, 1000], which is
* wrong, because these data are possibly stale. */
* wrong, because these data are possibly stale.
*/
if (!lockless && osc_lock_compatible(olck, scan_ols))
continue;
......@@ -1120,7 +1130,8 @@ static int osc_lock_enqueue(const struct lu_env *env,
struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
/* lock will be passed as upcall cookie,
* hold ref to prevent to be released. */
* hold ref to prevent to be released.
*/
cl_lock_hold_add(env, lock, "upcall", lock);
/* a user for lock also */
cl_lock_user_add(env, lock);
......@@ -1171,7 +1182,8 @@ static int osc_lock_wait(const struct lu_env *env,
} else if (olck->ols_agl) {
if (lock->cll_flags & CLF_FROM_UPCALL)
/* It is from enqueue RPC reply upcall for
* updating state. Do not re-enqueue. */
* updating state. Do not re-enqueue.
*/
return -ENAVAIL;
olck->ols_state = OLS_NEW;
} else {
......@@ -1232,7 +1244,8 @@ static int osc_lock_use(const struct lu_env *env,
LASSERT(lock->cll_state == CLS_INTRANSIT);
LASSERT(lock->cll_users > 0);
/* set a flag for osc_dlm_blocking_ast0() to signal the
* lock.*/
* lock.
*/
olck->ols_ast_wait = 1;
rc = CLO_WAIT;
}
......@@ -1315,7 +1328,8 @@ static void osc_lock_cancel(const struct lu_env *env,
/* Now that we're the only user of dlm read/write reference,
* mostly the ->l_readers + ->l_writers should be zero.
* However, there is a corner case.
* See bug 18829 for details.*/
* See bug 18829 for details.
*/
do_cancel = (dlmlock->l_readers == 0 &&
dlmlock->l_writers == 0);
dlmlock->l_flags |= LDLM_FL_CBPENDING;
......@@ -1514,7 +1528,8 @@ static void osc_lock_lockless_state(const struct lu_env *env,
lock->ols_owner = oio;
/* set the io to be lockless if this lock is for io's
* host object */
* host object
*/
if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
oio->oi_lockless = 1;
}
......
......@@ -105,7 +105,8 @@ static void osc_page_transfer_add(const struct lu_env *env,
struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
/* ops_lru and ops_inflight share the same field, so take it from LRU
* first and then use it as inflight. */
* first and then use it as inflight.
*/
osc_lru_del(osc_cli(obj), opg, false);
spin_lock(&obj->oo_seatbelt);
......@@ -133,7 +134,8 @@ static int osc_page_cache_add(const struct lu_env *env,
/* for sync write, kernel will wait for this page to be flushed before
* osc_io_end() is called, so release it earlier.
* for mkwrite(), it's known there is no further pages. */
* for mkwrite(), it's known there is no further pages.
*/
if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) {
if (oio->oi_active) {
osc_extent_release(env, oio->oi_active);
......@@ -359,7 +361,8 @@ static int osc_page_cancel(const struct lu_env *env,
LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
/* Check if the transferring against this page
* is completed, or not even queued. */
* is completed, or not even queued.
*/
if (opg->ops_transfer_pinned)
/* FIXME: may not be interrupted.. */
rc = osc_cancel_async_page(env, opg);
......@@ -423,7 +426,8 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
* creates temporary pages outside of a lock.
*/
/* ops_inflight and ops_lru are the same field, but it doesn't
* hurt to initialize it twice :-) */
* hurt to initialize it twice :-)
*/
INIT_LIST_HEAD(&opg->ops_inflight);
INIT_LIST_HEAD(&opg->ops_lru);
......@@ -482,7 +486,8 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
/* LRU pages are freed in batch mode. OSC should at least free this
* number of pages to avoid running out of LRU budget, and.. */
* number of pages to avoid running out of LRU budget, and..
*/
static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */
/* free this number at most otherwise it will take too long time to finish. */
static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
......@@ -491,7 +496,8 @@ static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
* we should free slots aggressively. In this way, slots are freed in a steady
* step to maintain fairness among OSCs.
*
* Return how many LRU pages should be freed. */
* Return how many LRU pages should be freed.
*/
static int osc_cache_too_much(struct client_obd *cli)
{
struct cl_client_cache *cache = cli->cl_cache;
......@@ -503,7 +509,8 @@ static int osc_cache_too_much(struct client_obd *cli)
return min(pages, lru_shrink_max);
/* if it's going to run out LRU slots, we should free some, but not
* too much to maintain fairness among OSCs. */
* too much to maintain fairness among OSCs.
*/
if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
unsigned long tmp;
......@@ -531,7 +538,8 @@ static int discard_pagevec(const struct lu_env *env, struct cl_io *io,
/* free LRU page only if nobody is using it.
* This check is necessary to avoid freeing the pages
* having already been removed from LRU and pinned
* for IO. */
* for IO.
*/
if (!cl_page_in_use(page)) {
cl_page_unmap(env, io, page);
cl_page_discard(env, io, page);
......@@ -621,11 +629,13 @@ int osc_lru_shrink(struct client_obd *cli, int target)
/* move this page to the end of list as it will be discarded
* soon. The page will be finally removed from LRU list in
* osc_page_delete(). */
* osc_page_delete().
*/
list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
/* it's okay to grab a refcount here w/o holding lock because
* it has to grab cl_lru_list_lock to delete the page. */
* it has to grab cl_lru_list_lock to delete the page.
*/
cl_page_get(page);
pvec[index++] = page;
if (++count >= target)
......@@ -676,7 +686,8 @@ static void osc_lru_add(struct client_obd *cli, struct osc_page *opg)
}
/* delete page from LRUlist. The page can be deleted from LRUlist for two
* reasons: redirtied or deleted from page cache. */
* reasons: redirtied or deleted from page cache.
*/
static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del)
{
if (opg->ops_in_lru) {
......@@ -698,7 +709,8 @@ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del)
* this osc occupies too many LRU pages and kernel is
* stealing one of them.
* cl_lru_shrinkers is to avoid recursive call in case
* we're already in the context of osc_lru_shrink(). */
* we're already in the context of osc_lru_shrink().
*/
if (atomic_read(&cli->cl_lru_shrinkers) == 0 &&
!memory_pressure_get())
osc_lru_shrink(cli, osc_cache_too_much(cli));
......@@ -735,7 +747,8 @@ static int osc_lru_reclaim(struct client_obd *cli)
atomic_read(&cli->cl_lru_busy));
/* Reclaim LRU slots from other client_obd as it can't free enough
* from its own. This should rarely happen. */
* from its own. This should rarely happen.
*/
spin_lock(&cache->ccc_lru_lock);
LASSERT(!list_empty(&cache->ccc_lru));
......@@ -793,7 +806,8 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
cond_resched();
/* slowest case, all of caching pages are busy, notifying
* other OSCs that we're lack of LRU slots. */
* other OSCs that we're lack of LRU slots.
*/
atomic_inc(&osc_lru_waiters);
gen = atomic_read(&cli->cl_lru_in_list);
......
......@@ -47,10 +47,12 @@ int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[])
oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]);
if (oqi) {
/* do not try to access oqi here, it could have been
* freed by osc_quota_setdq() */
* freed by osc_quota_setdq()
*/
/* the slot is busy, the user is about to run out of
* quota space on this OST */
* quota space on this OST
*/
CDEBUG(D_QUOTA, "chkdq found noquota for %s %d\n",
type == USRQUOTA ? "user" : "grout", qid[type]);
return NO_QUOTA;
......@@ -84,7 +86,8 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]);
if ((flags & FL_QUOTA_FLAG(type)) != 0) {
/* This ID is getting close to its quota limit, let's
* switch to sync I/O */
* switch to sync I/O
*/
if (oqi)
continue;
......@@ -108,7 +111,8 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
qid[type], rc);
} else {
/* This ID is now off the hook, let's remove it from
* the hash table */
* the hash table
*/
if (!oqi)
continue;
......@@ -297,8 +301,8 @@ int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
ptlrpc_request_set_replen(req);
/* the next poll will find -ENODATA, that means quotacheck is
* going on */
/* the next poll will find -ENODATA, that means quotacheck is going on
*/
cli->cl_qchk_stat = -ENODATA;
rc = ptlrpc_queue_wait(req);
if (rc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment