Commit 11d66e89 authored by Masanari Iida's avatar Masanari Iida Committed by Greg Kroah-Hartman

staging: lustre: Fix typo in lustre/lustre/osc

Correct spelling typo in lustre/lustre/osc
Signed-off-by: default avatarMasanari Iida <standby24x7@gmail.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent b64767de
...@@ -1703,7 +1703,7 @@ static int osc_list_maint(struct client_obd *cli, struct osc_object *osc) ...@@ -1703,7 +1703,7 @@ static int osc_list_maint(struct client_obd *cli, struct osc_object *osc)
return is_ready; return is_ready;
} }
/* this is trying to propogate async writeback errors back up to the /* this is trying to propagate async writeback errors back up to the
* application. As an async write fails we record the error code for later if * application. As an async write fails we record the error code for later if
* the app does an fsync. As long as errors persist we force future rpcs to be * the app does an fsync. As long as errors persist we force future rpcs to be
* sync so that the app can get a sync error and break the cycle of queueing * sync so that the app can get a sync error and break the cycle of queueing
...@@ -2006,7 +2006,7 @@ static struct osc_object *osc_next_obj(struct client_obd *cli) ...@@ -2006,7 +2006,7 @@ static struct osc_object *osc_next_obj(struct client_obd *cli)
/* then if we have cache waiters, return all objects with queued /* then if we have cache waiters, return all objects with queued
* writes. This is especially important when many small files * writes. This is especially important when many small files
* have filled up the cache and not been fired into rpcs because * have filled up the cache and not been fired into rpcs because
* they don't pass the nr_pending/object threshhold */ * they don't pass the nr_pending/object threshold */
if (!list_empty(&cli->cl_cache_waiters) && if (!list_empty(&cli->cl_cache_waiters) &&
!list_empty(&cli->cl_loi_write_list)) !list_empty(&cli->cl_loi_write_list))
return list_to_obj(&cli->cl_loi_write_list, write_item); return list_to_obj(&cli->cl_loi_write_list, write_item);
...@@ -2226,7 +2226,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, ...@@ -2226,7 +2226,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
/* Add this page into extent by the following steps: /* Add this page into extent by the following steps:
* 1. if there exists an active extent for this IO, mostly this page * 1. if there exists an active extent for this IO, mostly this page
* can be added to the active extent and sometimes we need to * can be added to the active extent and sometimes we need to
* expand extent to accomodate this page; * expand extent to accommodate this page;
* 2. otherwise, a new extent will be allocated. */ * 2. otherwise, a new extent will be allocated. */
ext = oio->oi_active; ext = oio->oi_active;
......
...@@ -299,7 +299,7 @@ struct osc_lock { ...@@ -299,7 +299,7 @@ struct osc_lock {
ols_flush:1, ols_flush:1,
/** /**
* if set, the osc_lock is a glimpse lock. For glimpse locks, we treat * if set, the osc_lock is a glimpse lock. For glimpse locks, we treat
* the EVAVAIL error as torerable, this will make upper logic happy * the EVAVAIL error as tolerable, this will make upper logic happy
* to wait all glimpse locks to each OSTs to be completed. * to wait all glimpse locks to each OSTs to be completed.
* Glimpse lock converts to normal lock if the server lock is * Glimpse lock converts to normal lock if the server lock is
* granted. * granted.
......
...@@ -929,7 +929,7 @@ static void osc_lock_build_einfo(const struct lu_env *env, ...@@ -929,7 +929,7 @@ static void osc_lock_build_einfo(const struct lu_env *env,
* Determine if the lock should be converted into a lockless lock. * Determine if the lock should be converted into a lockless lock.
* *
* Steps to check: * Steps to check:
* - if the lock has an explicite requirment for a non-lockless lock; * - if the lock has an explicit requirement for a non-lockless lock;
* - if the io lock request type ci_lockreq; * - if the io lock request type ci_lockreq;
* - send the enqueue rpc to ost to make the further decision; * - send the enqueue rpc to ost to make the further decision;
* - special treat to truncate lockless lock * - special treat to truncate lockless lock
......
...@@ -587,7 +587,7 @@ static atomic_t osc_lru_waiters = ATOMIC_INIT(0); ...@@ -587,7 +587,7 @@ static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
/* LRU pages are freed in batch mode. OSC should at least free this /* LRU pages are freed in batch mode. OSC should at least free this
* number of pages to avoid running out of LRU budget, and.. */ * number of pages to avoid running out of LRU budget, and.. */
static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */ static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */
/* free this number at most otherwise it will take too long time to finsih. */ /* free this number at most otherwise it will take too long time to finish. */
static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */ static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
/* Check if we can free LRU slots from this OSC. If there exists LRU waiters, /* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
...@@ -606,7 +606,7 @@ static int osc_cache_too_much(struct client_obd *cli) ...@@ -606,7 +606,7 @@ static int osc_cache_too_much(struct client_obd *cli)
return min(pages, lru_shrink_max); return min(pages, lru_shrink_max);
/* if it's going to run out LRU slots, we should free some, but not /* if it's going to run out LRU slots, we should free some, but not
* too much to maintain faireness among OSCs. */ * too much to maintain fairness among OSCs. */
if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) { if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
unsigned long tmp; unsigned long tmp;
......
...@@ -773,7 +773,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, ...@@ -773,7 +773,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
osc_pack_capa(req, body, (struct obd_capa *)capa); osc_pack_capa(req, body, (struct obd_capa *)capa);
ptlrpc_request_set_replen(req); ptlrpc_request_set_replen(req);
/* If osc_destory is for destroying the unlink orphan, /* If osc_destroy is for destroying the unlink orphan,
* sent from MDT to OST, which should not be blocked here, * sent from MDT to OST, which should not be blocked here,
* because the process might be triggered by ptlrpcd, and * because the process might be triggered by ptlrpcd, and
* it is not good to block ptlrpcd thread (b=16006)*/ * it is not good to block ptlrpcd thread (b=16006)*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment