Commit 17891183 authored by Masanari Iida's avatar Masanari Iida Committed by Greg Kroah-Hartman

staging: lustre: Fix typo in lustre/include part1

Fix typo in comments within lustre/include.
Signed-off-by: default avatarMasanari Iida <standby24x7@gmail.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent d0a0acc3
...@@ -1185,7 +1185,7 @@ ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags) ...@@ -1185,7 +1185,7 @@ ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags)
/** /**
* Update Lock Value Block Operations (LVBO) on a resource taking into account * Update Lock Value Block Operations (LVBO) on a resource taking into account
* data from reqest \a r * data from request \a r
*/ */
static inline int ldlm_res_lvbo_update(struct ldlm_resource *res, static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
struct ptlrpc_request *r, int increase) struct ptlrpc_request *r, int increase)
......
...@@ -147,7 +147,7 @@ ...@@ -147,7 +147,7 @@
#define ldlm_clear_test_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 19) #define ldlm_clear_test_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 19)
/** /**
* Immediatelly cancel such locks when they block some other locks. Send * Immediately cancel such locks when they block some other locks. Send
* cancel notification to original lock holder, but expect no reply. This * cancel notification to original lock holder, but expect no reply. This
* is for clients (like liblustre) that cannot be expected to reliably * is for clients (like liblustre) that cannot be expected to reliably
* response to blocking AST. */ * response to blocking AST. */
...@@ -248,7 +248,7 @@ ...@@ -248,7 +248,7 @@
/** /**
* A lock contributes to the known minimum size (KMS) calculation until it * A lock contributes to the known minimum size (KMS) calculation until it
* has finished the part of its cancelation that performs write back on its * has finished the part of its cancellation that performs write back on its
* dirty pages. It can remain on the granted list during this whole time. * dirty pages. It can remain on the granted list during this whole time.
* Threads racing to update the KMS after performing their writeback need * Threads racing to update the KMS after performing their writeback need
* to know to exclude each other's locks from the calculation as they walk * to know to exclude each other's locks from the calculation as they walk
......
...@@ -192,9 +192,9 @@ struct obd_export { ...@@ -192,9 +192,9 @@ struct obd_export {
struct obd_import *exp_imp_reverse; struct obd_import *exp_imp_reverse;
struct nid_stat *exp_nid_stats; struct nid_stat *exp_nid_stats;
struct lprocfs_stats *exp_md_stats; struct lprocfs_stats *exp_md_stats;
/** Active connetion */ /** Active connection */
struct ptlrpc_connection *exp_connection; struct ptlrpc_connection *exp_connection;
/** Connection count value from last succesful reconnect rpc */ /** Connection count value from last successful reconnect rpc */
__u32 exp_conn_cnt; __u32 exp_conn_cnt;
/** Hash list of all ldlm locks granted on this export */ /** Hash list of all ldlm locks granted on this export */
struct cfs_hash *exp_lock_hash; struct cfs_hash *exp_lock_hash;
......
...@@ -339,7 +339,7 @@ struct lu_client_seq { ...@@ -339,7 +339,7 @@ struct lu_client_seq {
struct mutex lcs_mutex; struct mutex lcs_mutex;
/* /*
* Range of allowed for allocation sequeces. When using lu_client_seq on * Range of allowed for allocation sequences. When using lu_client_seq on
* clients, this contains meta-sequence range. And for servers this * clients, this contains meta-sequence range. And for servers this
* contains super-sequence range. * contains super-sequence range.
*/ */
...@@ -398,7 +398,7 @@ struct lu_server_seq { ...@@ -398,7 +398,7 @@ struct lu_server_seq {
/* LUSTRE_SEQ_SERVER or LUSTRE_SEQ_CONTROLLER */ /* LUSTRE_SEQ_SERVER or LUSTRE_SEQ_CONTROLLER */
enum lu_mgr_type lss_type; enum lu_mgr_type lss_type;
/* Client interafce to request controller */ /* Client interface to request controller */
struct lu_client_seq *lss_cli; struct lu_client_seq *lss_cli;
/* Mutex for protecting allocation */ /* Mutex for protecting allocation */
...@@ -568,14 +568,14 @@ fid_build_pdo_res_name(const struct lu_fid *fid, unsigned int hash, ...@@ -568,14 +568,14 @@ fid_build_pdo_res_name(const struct lu_fid *fid, unsigned int hash,
* finally, when we replace ost_id with FID in data stack. * finally, when we replace ost_id with FID in data stack.
* *
* Currently, resid from the old client, whose res[0] = object_id, * Currently, resid from the old client, whose res[0] = object_id,
* res[1] = object_seq, is just oposite with Metatdata * res[1] = object_seq, is just opposite with Metatdata
* resid, where, res[0] = fid->f_seq, res[1] = fid->f_oid. * resid, where, res[0] = fid->f_seq, res[1] = fid->f_oid.
* To unifiy the resid identification, we will reverse the data * To unifiy the resid identification, we will reverse the data
* resid to keep it same with Metadata resid, i.e. * resid to keep it same with Metadata resid, i.e.
* *
* For resid from the old client, * For resid from the old client,
* res[0] = objid, res[1] = 0, still keep the original order, * res[0] = objid, res[1] = 0, still keep the original order,
* for compatiblity. * for compatibility.
* *
* For new resid * For new resid
* res will be built from normal FID directly, i.e. res[0] = f_seq, * res will be built from normal FID directly, i.e. res[0] = f_seq,
......
...@@ -445,7 +445,7 @@ struct ptlrpc_reply_state { ...@@ -445,7 +445,7 @@ struct ptlrpc_reply_state {
lnet_handle_md_t rs_md_h; lnet_handle_md_t rs_md_h;
atomic_t rs_refcount; atomic_t rs_refcount;
/** Context for the sevice thread */ /** Context for the service thread */
struct ptlrpc_svc_ctx *rs_svc_ctx; struct ptlrpc_svc_ctx *rs_svc_ctx;
/** Reply buffer (actually sent to the client), encoded if needed */ /** Reply buffer (actually sent to the client), encoded if needed */
struct lustre_msg *rs_repbuf; /* wrapper */ struct lustre_msg *rs_repbuf; /* wrapper */
...@@ -497,7 +497,7 @@ struct ptlrpc_request_pool { ...@@ -497,7 +497,7 @@ struct ptlrpc_request_pool {
spinlock_t prp_lock; spinlock_t prp_lock;
/** list of ptlrpc_request structs */ /** list of ptlrpc_request structs */
struct list_head prp_req_list; struct list_head prp_req_list;
/** Maximum message size that would fit into a rquest from this pool */ /** Maximum message size that would fit into a request from this pool */
int prp_rq_size; int prp_rq_size;
/** Function to allocate more requests for this pool */ /** Function to allocate more requests for this pool */
void (*prp_populate)(struct ptlrpc_request_pool *, int); void (*prp_populate)(struct ptlrpc_request_pool *, int);
...@@ -1351,7 +1351,7 @@ struct nrs_orr_data { ...@@ -1351,7 +1351,7 @@ struct nrs_orr_data {
*/ */
enum nrs_orr_supp od_supp; enum nrs_orr_supp od_supp;
/** /**
* Round Robin quantum; the maxium number of RPCs that each request * Round Robin quantum; the maximum number of RPCs that each request
* batch for each object or OST can have in a scheduling round. * batch for each object or OST can have in a scheduling round.
*/ */
__u16 od_quantum; __u16 od_quantum;
...@@ -1486,7 +1486,7 @@ struct ptlrpc_nrs_request { ...@@ -1486,7 +1486,7 @@ struct ptlrpc_nrs_request {
*/ */
struct nrs_fifo_req fifo; struct nrs_fifo_req fifo;
/** /**
* CRR-N request defintion * CRR-N request definition
*/ */
struct nrs_crrn_req crr; struct nrs_crrn_req crr;
/** ORR and TRR share the same request definition */ /** ORR and TRR share the same request definition */
...@@ -1550,7 +1550,7 @@ struct ptlrpc_request { ...@@ -1550,7 +1550,7 @@ struct ptlrpc_request {
* requests in time * requests in time
*/ */
struct list_head rq_timed_list; struct list_head rq_timed_list;
/** server-side history, used for debuging purposes. */ /** server-side history, used for debugging purposes. */
struct list_head rq_history_list; struct list_head rq_history_list;
/** server-side per-export list */ /** server-side per-export list */
struct list_head rq_exp_list; struct list_head rq_exp_list;
...@@ -1611,7 +1611,7 @@ struct ptlrpc_request { ...@@ -1611,7 +1611,7 @@ struct ptlrpc_request {
enum rq_phase rq_phase; /* one of RQ_PHASE_* */ enum rq_phase rq_phase; /* one of RQ_PHASE_* */
enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */ enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
atomic_t rq_refcount;/* client-side refcount for SENT race, atomic_t rq_refcount;/* client-side refcount for SENT race,
server-side refcounf for multiple replies */ server-side refcount for multiple replies */
/** Portal to which this request would be sent */ /** Portal to which this request would be sent */
short rq_request_portal; /* XXX FIXME bug 249 */ short rq_request_portal; /* XXX FIXME bug 249 */
...@@ -1637,7 +1637,7 @@ struct ptlrpc_request { ...@@ -1637,7 +1637,7 @@ struct ptlrpc_request {
/** xid */ /** xid */
__u64 rq_xid; __u64 rq_xid;
/** /**
* List item to for replay list. Not yet commited requests get linked * List item to for replay list. Not yet committed requests get linked
* there. * there.
* Also see \a rq_replay comment above. * Also see \a rq_replay comment above.
*/ */
...@@ -1952,7 +1952,7 @@ void _debug_req(struct ptlrpc_request *req, ...@@ -1952,7 +1952,7 @@ void _debug_req(struct ptlrpc_request *req,
__attribute__ ((format (printf, 3, 4))); __attribute__ ((format (printf, 3, 4)));
/** /**
* Helper that decides if we need to print request accordig to current debug * Helper that decides if we need to print request according to current debug
* level settings * level settings
*/ */
#define debug_req(msgdata, mask, cdls, req, fmt, a...) \ #define debug_req(msgdata, mask, cdls, req, fmt, a...) \
...@@ -1966,7 +1966,7 @@ do { \ ...@@ -1966,7 +1966,7 @@ do { \
} while(0) } while(0)
/** /**
* This is the debug print function you need to use to print request sturucture * This is the debug print function you need to use to print request structure
* content into lustre debug log. * content into lustre debug log.
* for most callers (level is a constant) this is resolved at compile time */ * for most callers (level is a constant) this is resolved at compile time */
#define DEBUG_REQ(level, req, fmt, args...) \ #define DEBUG_REQ(level, req, fmt, args...) \
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
* *
* lustre/include/md_object.h * lustre/include/md_object.h
* *
* Extention of lu_object.h for metadata objects * Extension of lu_object.h for metadata objects
*/ */
#ifndef _LUSTRE_MD_OBJECT_H #ifndef _LUSTRE_MD_OBJECT_H
......
...@@ -158,7 +158,7 @@ struct obd_info { ...@@ -158,7 +158,7 @@ struct obd_info {
/* statfs data specific for every OSC, if needed at all. */ /* statfs data specific for every OSC, if needed at all. */
struct obd_statfs *oi_osfs; struct obd_statfs *oi_osfs;
/* An update callback which is called to update some data on upper /* An update callback which is called to update some data on upper
* level. E.g. it is used for update lsm->lsm_oinfo at every recieved * level. E.g. it is used for update lsm->lsm_oinfo at every received
* request in osc level for enqueue requests. It is also possible to * request in osc level for enqueue requests. It is also possible to
* update some caller data from LOV layer if needed. */ * update some caller data from LOV layer if needed. */
obd_enqueue_update_f oi_cb_up; obd_enqueue_update_f oi_cb_up;
...@@ -1042,8 +1042,8 @@ static inline int it_to_lock_mode(struct lookup_intent *it) ...@@ -1042,8 +1042,8 @@ static inline int it_to_lock_mode(struct lookup_intent *it)
} }
struct md_op_data { struct md_op_data {
struct lu_fid op_fid1; /* operation fid1 (usualy parent) */ struct lu_fid op_fid1; /* operation fid1 (usually parent) */
struct lu_fid op_fid2; /* operation fid2 (usualy child) */ struct lu_fid op_fid2; /* operation fid2 (usually child) */
struct lu_fid op_fid3; /* 2 extra fids to find conflicting */ struct lu_fid op_fid3; /* 2 extra fids to find conflicting */
struct lu_fid op_fid4; /* to the operation locks. */ struct lu_fid op_fid4; /* to the operation locks. */
mdsno_t op_mds; /* what mds server open will go to */ mdsno_t op_mds; /* what mds server open will go to */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment