Commit 97cba130 authored by Dmitry Eremin's avatar Dmitry Eremin Committed by Greg Kroah-Hartman

staging: lustre: fix comparison between signed and unsigned

Cleanup in general headers.
* use size_t in cfs_size_round*()
* make unsigned index and len in lustre_cfg_*()
* make iteration variable the same type as comparing value
* make unsigned pages counters
Signed-off-by: default avatarDmitry Eremin <dmitry.eremin@intel.com>
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-5417
Reviewed-on: http://review.whamcloud.com/11327Reviewed-by: default avatarJohn L. Hammond <john.hammond@intel.com>
Reviewed-by: default avatarFan Yong <fan.yong@intel.com>
Reviewed-by: default avatarOleg Drokin <oleg.drokin@intel.com>
Signed-off-by: default avatarJames Simmons <jsimmons@infradead.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 058a6a42
...@@ -310,13 +310,13 @@ do { \ ...@@ -310,13 +310,13 @@ do { \
#define MKSTR(ptr) ((ptr)) ? (ptr) : "" #define MKSTR(ptr) ((ptr)) ? (ptr) : ""
static inline int cfs_size_round4(int val) static inline size_t cfs_size_round4(int val)
{ {
return (val + 3) & (~0x3); return (val + 3) & (~0x3);
} }
#ifndef HAVE_CFS_SIZE_ROUND #ifndef HAVE_CFS_SIZE_ROUND
static inline int cfs_size_round(int val) static inline size_t cfs_size_round(int val)
{ {
return (val + 7) & (~0x7); return (val + 7) & (~0x7);
} }
...@@ -324,17 +324,17 @@ static inline int cfs_size_round(int val) ...@@ -324,17 +324,17 @@ static inline int cfs_size_round(int val)
#define HAVE_CFS_SIZE_ROUND #define HAVE_CFS_SIZE_ROUND
#endif #endif
static inline int cfs_size_round16(int val) static inline size_t cfs_size_round16(int val)
{ {
return (val + 0xf) & (~0xf); return (val + 0xf) & (~0xf);
} }
static inline int cfs_size_round32(int val) static inline size_t cfs_size_round32(int val)
{ {
return (val + 0x1f) & (~0x1f); return (val + 0x1f) & (~0x1f);
} }
static inline int cfs_size_round0(int val) static inline size_t cfs_size_round0(int val)
{ {
if (!val) if (!val)
return 0; return 0;
...@@ -343,7 +343,7 @@ static inline int cfs_size_round0(int val) ...@@ -343,7 +343,7 @@ static inline int cfs_size_round0(int val)
static inline size_t cfs_round_strlen(char *fset) static inline size_t cfs_round_strlen(char *fset)
{ {
return (size_t)cfs_size_round((int)strlen(fset) + 1); return cfs_size_round((int)strlen(fset) + 1);
} }
#define LOGL(var, len, ptr) \ #define LOGL(var, len, ptr) \
......
...@@ -499,7 +499,7 @@ static inline __u64 lprocfs_stats_collector(struct lprocfs_stats *stats, ...@@ -499,7 +499,7 @@ static inline __u64 lprocfs_stats_collector(struct lprocfs_stats *stats,
int idx, int idx,
enum lprocfs_fields_flags field) enum lprocfs_fields_flags field)
{ {
int i; unsigned int i;
unsigned int num_cpu; unsigned int num_cpu;
unsigned long flags = 0; unsigned long flags = 0;
__u64 ret = 0; __u64 ret = 0;
......
...@@ -151,13 +151,11 @@ static inline void lustre_cfg_bufs_reset(struct lustre_cfg_bufs *bufs, char *nam ...@@ -151,13 +151,11 @@ static inline void lustre_cfg_bufs_reset(struct lustre_cfg_bufs *bufs, char *nam
lustre_cfg_bufs_set_string(bufs, 0, name); lustre_cfg_bufs_set_string(bufs, 0, name);
} }
static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, int index) static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, __u32 index)
{ {
int i; __u32 i;
int offset; size_t offset;
int bufcount; __u32 bufcount;
LASSERT(index >= 0);
bufcount = lcfg->lcfg_bufcount; bufcount = lcfg->lcfg_bufcount;
if (index >= bufcount) if (index >= bufcount)
...@@ -172,7 +170,7 @@ static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, int index) ...@@ -172,7 +170,7 @@ static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, int index)
static inline void lustre_cfg_bufs_init(struct lustre_cfg_bufs *bufs, static inline void lustre_cfg_bufs_init(struct lustre_cfg_bufs *bufs,
struct lustre_cfg *lcfg) struct lustre_cfg *lcfg)
{ {
int i; __u32 i;
bufs->lcfg_bufcount = lcfg->lcfg_bufcount; bufs->lcfg_bufcount = lcfg->lcfg_bufcount;
for (i = 0; i < bufs->lcfg_bufcount; i++) { for (i = 0; i < bufs->lcfg_bufcount; i++) {
...@@ -181,7 +179,7 @@ static inline void lustre_cfg_bufs_init(struct lustre_cfg_bufs *bufs, ...@@ -181,7 +179,7 @@ static inline void lustre_cfg_bufs_init(struct lustre_cfg_bufs *bufs,
} }
} }
static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, int index) static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, __u32 index)
{ {
char *s; char *s;
...@@ -197,8 +195,8 @@ static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, int index) ...@@ -197,8 +195,8 @@ static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, int index)
* of data. Try to use the padding first though. * of data. Try to use the padding first though.
*/ */
if (s[lcfg->lcfg_buflens[index] - 1] != '\0') { if (s[lcfg->lcfg_buflens[index] - 1] != '\0') {
int last = min((int)lcfg->lcfg_buflens[index], size_t last = min((size_t)lcfg->lcfg_buflens[index],
cfs_size_round(lcfg->lcfg_buflens[index]) - 1); cfs_size_round(lcfg->lcfg_buflens[index]) - 1);
char lost = s[last]; char lost = s[last];
s[last] = '\0'; s[last] = '\0';
...@@ -210,10 +208,10 @@ static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, int index) ...@@ -210,10 +208,10 @@ static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, int index)
return s; return s;
} }
static inline int lustre_cfg_len(__u32 bufcount, __u32 *buflens) static inline __u32 lustre_cfg_len(__u32 bufcount, __u32 *buflens)
{ {
int i; __u32 i;
int len; __u32 len;
len = LCFG_HDR_SIZE(bufcount); len = LCFG_HDR_SIZE(bufcount);
for (i = 0; i < bufcount; i++) for (i = 0; i < bufcount; i++)
...@@ -254,7 +252,7 @@ static inline void lustre_cfg_free(struct lustre_cfg *lcfg) ...@@ -254,7 +252,7 @@ static inline void lustre_cfg_free(struct lustre_cfg *lcfg)
return; return;
} }
static inline int lustre_cfg_sanity_check(void *buf, int len) static inline int lustre_cfg_sanity_check(void *buf, size_t len)
{ {
struct lustre_cfg *lcfg = (struct lustre_cfg *)buf; struct lustre_cfg *lcfg = (struct lustre_cfg *)buf;
......
...@@ -55,7 +55,7 @@ struct lmv_stripe_md { ...@@ -55,7 +55,7 @@ struct lmv_stripe_md {
static inline bool static inline bool
lsm_md_eq(const struct lmv_stripe_md *lsm1, const struct lmv_stripe_md *lsm2) lsm_md_eq(const struct lmv_stripe_md *lsm1, const struct lmv_stripe_md *lsm2)
{ {
int idx; __u32 idx;
if (lsm1->lsm_md_magic != lsm2->lsm_md_magic || if (lsm1->lsm_md_magic != lsm2->lsm_md_magic ||
lsm1->lsm_md_stripe_count != lsm2->lsm_md_stripe_count || lsm1->lsm_md_stripe_count != lsm2->lsm_md_stripe_count ||
...@@ -92,7 +92,7 @@ static inline void lmv_free_memmd(struct lmv_stripe_md *lsm) ...@@ -92,7 +92,7 @@ static inline void lmv_free_memmd(struct lmv_stripe_md *lsm)
static inline void lmv1_le_to_cpu(struct lmv_mds_md_v1 *lmv_dst, static inline void lmv1_le_to_cpu(struct lmv_mds_md_v1 *lmv_dst,
const struct lmv_mds_md_v1 *lmv_src) const struct lmv_mds_md_v1 *lmv_src)
{ {
int i; __u32 i;
lmv_dst->lmv_magic = le32_to_cpu(lmv_src->lmv_magic); lmv_dst->lmv_magic = le32_to_cpu(lmv_src->lmv_magic);
lmv_dst->lmv_stripe_count = le32_to_cpu(lmv_src->lmv_stripe_count); lmv_dst->lmv_stripe_count = le32_to_cpu(lmv_src->lmv_stripe_count);
......
...@@ -1631,7 +1631,7 @@ static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req) ...@@ -1631,7 +1631,7 @@ static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req)
/** /**
* Returns 1 if request buffer at offset \a index was already swabbed * Returns 1 if request buffer at offset \a index was already swabbed
*/ */
static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index) static inline int lustre_req_swabbed(struct ptlrpc_request *req, size_t index)
{ {
LASSERT(index < sizeof(req->rq_req_swab_mask) * 8); LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
return req->rq_req_swab_mask & (1 << index); return req->rq_req_swab_mask & (1 << index);
...@@ -1640,7 +1640,7 @@ static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index) ...@@ -1640,7 +1640,7 @@ static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
/** /**
* Returns 1 if request reply buffer at offset \a index was already swabbed * Returns 1 if request reply buffer at offset \a index was already swabbed
*/ */
static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index) static inline int lustre_rep_swabbed(struct ptlrpc_request *req, size_t index)
{ {
LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8); LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
return req->rq_rep_swab_mask & (1 << index); return req->rq_rep_swab_mask & (1 << index);
...@@ -1665,7 +1665,8 @@ static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req) ...@@ -1665,7 +1665,8 @@ static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req)
/** /**
* Mark request buffer at offset \a index that it was already swabbed * Mark request buffer at offset \a index that it was already swabbed
*/ */
static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index) static inline void lustre_set_req_swabbed(struct ptlrpc_request *req,
size_t index)
{ {
LASSERT(index < sizeof(req->rq_req_swab_mask) * 8); LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
LASSERT((req->rq_req_swab_mask & (1 << index)) == 0); LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
...@@ -1675,7 +1676,8 @@ static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index) ...@@ -1675,7 +1676,8 @@ static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
/** /**
* Mark request reply buffer at offset \a index that it was already swabbed * Mark request reply buffer at offset \a index that it was already swabbed
*/ */
static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index) static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req,
size_t index)
{ {
LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8); LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0); LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
......
...@@ -222,11 +222,11 @@ struct client_obd { ...@@ -222,11 +222,11 @@ struct client_obd {
struct sptlrpc_flavor cl_flvr_mgc; /* fixed flavor of mgc->mgs */ struct sptlrpc_flavor cl_flvr_mgc; /* fixed flavor of mgc->mgs */
/* the grant values are protected by loi_list_lock below */ /* the grant values are protected by loi_list_lock below */
long cl_dirty_pages; /* all _dirty_ in pahges */ unsigned long cl_dirty_pages; /* all _dirty_ in pahges */
long cl_dirty_max_pages;/* allowed w/o rpc */ unsigned long cl_dirty_max_pages; /* allowed w/o rpc */
long cl_dirty_transit; /* dirty synchronous */ unsigned long cl_dirty_transit; /* dirty synchronous */
long cl_avail_grant; /* bytes of credit for ost */ unsigned long cl_avail_grant; /* bytes of credit for ost */
long cl_lost_grant; /* lost credits (trunc) */ unsigned long cl_lost_grant; /* lost credits (trunc) */
/* since we allocate grant by blocks, we don't know how many grant will /* since we allocate grant by blocks, we don't know how many grant will
* be used to add a page into cache. As a solution, we reserve maximum * be used to add a page into cache. As a solution, we reserve maximum
...@@ -268,13 +268,13 @@ struct client_obd { ...@@ -268,13 +268,13 @@ struct client_obd {
struct list_head cl_loi_hp_ready_list; struct list_head cl_loi_hp_ready_list;
struct list_head cl_loi_write_list; struct list_head cl_loi_write_list;
struct list_head cl_loi_read_list; struct list_head cl_loi_read_list;
int cl_r_in_flight; __u32 cl_r_in_flight;
int cl_w_in_flight; __u32 cl_w_in_flight;
/* just a sum of the loi/lop pending numbers to be exported by sysfs */ /* just a sum of the loi/lop pending numbers to be exported by sysfs */
atomic_t cl_pending_w_pages; atomic_t cl_pending_w_pages;
atomic_t cl_pending_r_pages; atomic_t cl_pending_r_pages;
__u32 cl_max_pages_per_rpc; __u32 cl_max_pages_per_rpc;
int cl_max_rpcs_in_flight; __u32 cl_max_rpcs_in_flight;
struct obd_histogram cl_read_rpc_hist; struct obd_histogram cl_read_rpc_hist;
struct obd_histogram cl_write_rpc_hist; struct obd_histogram cl_write_rpc_hist;
struct obd_histogram cl_read_page_hist; struct obd_histogram cl_read_page_hist;
...@@ -1183,8 +1183,8 @@ static inline void client_adjust_max_dirty(struct client_obd *cli) ...@@ -1183,8 +1183,8 @@ static inline void client_adjust_max_dirty(struct client_obd *cli)
cli->cl_dirty_max_pages = cli->cl_dirty_max_pages =
(OSC_MAX_DIRTY_DEFAULT * 1024 * 1024) >> PAGE_SHIFT; (OSC_MAX_DIRTY_DEFAULT * 1024 * 1024) >> PAGE_SHIFT;
else { else {
long dirty_max = cli->cl_max_rpcs_in_flight * unsigned long dirty_max = cli->cl_max_rpcs_in_flight *
cli->cl_max_pages_per_rpc; cli->cl_max_pages_per_rpc;
if (dirty_max > cli->cl_dirty_max_pages) if (dirty_max > cli->cl_dirty_max_pages)
cli->cl_dirty_max_pages = dirty_max; cli->cl_dirty_max_pages = dirty_max;
......
...@@ -823,9 +823,10 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, ...@@ -823,9 +823,10 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
cli->cl_dirty_pages, cli->cl_dirty_max_pages); cli->cl_dirty_pages, cli->cl_dirty_max_pages);
oa->o_undirty = 0; oa->o_undirty = 0;
} else { } else {
long max_in_flight = (cli->cl_max_pages_per_rpc << unsigned long max_in_flight;
PAGE_SHIFT) *
(cli->cl_max_rpcs_in_flight + 1); max_in_flight = (cli->cl_max_pages_per_rpc << PAGE_SHIFT) *
(cli->cl_max_rpcs_in_flight + 1);
oa->o_undirty = max(cli->cl_dirty_max_pages << PAGE_SHIFT, oa->o_undirty = max(cli->cl_dirty_max_pages << PAGE_SHIFT,
max_in_flight); max_in_flight);
} }
...@@ -2048,7 +2049,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, ...@@ -2048,7 +2049,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
} }
spin_unlock(&cli->cl_loi_list_lock); spin_unlock(&cli->cl_loi_list_lock);
DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight", DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %ur/%dw in flight",
page_count, aa, cli->cl_r_in_flight, page_count, aa, cli->cl_r_in_flight,
cli->cl_w_in_flight); cli->cl_w_in_flight);
......
...@@ -1734,7 +1734,7 @@ EXPORT_SYMBOL(lustre_swab_mgs_target_info); ...@@ -1734,7 +1734,7 @@ EXPORT_SYMBOL(lustre_swab_mgs_target_info);
void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *entry) void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *entry)
{ {
int i; __u8 i;
__swab64s(&entry->mne_version); __swab64s(&entry->mne_version);
__swab32s(&entry->mne_instance); __swab32s(&entry->mne_instance);
...@@ -1823,7 +1823,7 @@ static void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent) ...@@ -1823,7 +1823,7 @@ static void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent)
void lustre_swab_fiemap(struct ll_user_fiemap *fiemap) void lustre_swab_fiemap(struct ll_user_fiemap *fiemap)
{ {
int i; __u32 i;
__swab64s(&fiemap->fm_start); __swab64s(&fiemap->fm_start);
__swab64s(&fiemap->fm_length); __swab64s(&fiemap->fm_length);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment