Commit cce3c2da authored by Oleg Drokin's avatar Oleg Drokin Committed by Greg Kroah-Hartman

staging/lustre/obdclass: Adjust NULL comparison codestyle

All instances of "x == NULL" are changed to "!x" and
"x != NULL" to "x"

Also remove some redundant assertions.
Signed-off-by: default avatarOleg Drokin <green@linuxhacker.ru>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent eb17cc24
......@@ -104,7 +104,7 @@ static int lustre_posix_acl_xattr_reduce_space(posix_acl_xattr_header **header,
return old_size;
new = kmemdup(*header, new_size, GFP_NOFS);
if (unlikely(new == NULL))
if (unlikely(!new))
return -ENOMEM;
kfree(*header);
......@@ -124,7 +124,7 @@ static int lustre_ext_acl_xattr_reduce_space(ext_acl_xattr_header **header,
return 0;
new = kmemdup(*header, ext_size, GFP_NOFS);
if (unlikely(new == NULL))
if (unlikely(!new))
return -ENOMEM;
kfree(*header);
......@@ -149,7 +149,7 @@ lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size)
count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
esize = CFS_ACL_XATTR_SIZE(count, ext_acl_xattr);
new = kzalloc(esize, GFP_NOFS);
if (unlikely(new == NULL))
if (unlikely(!new))
return ERR_PTR(-ENOMEM);
new->a_count = cpu_to_le32(count);
......@@ -180,7 +180,7 @@ int lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, size_t size,
return -EINVAL;
new = kzalloc(size, GFP_NOFS);
if (unlikely(new == NULL))
if (unlikely(!new))
return -ENOMEM;
new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION);
......@@ -300,7 +300,7 @@ lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size,
ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr);
new = kzalloc(ext_size, GFP_NOFS);
if (unlikely(new == NULL))
if (unlikely(!new))
return ERR_PTR(-ENOMEM);
for (i = 0, j = 0; i < posix_count; i++) {
......
......@@ -96,7 +96,7 @@ static int cl_lock_invariant(const struct lu_env *env,
result = atomic_read(&lock->cll_ref) > 0 &&
cl_lock_invariant_trusted(env, lock);
if (!result && env != NULL)
if (!result && env)
CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
return result;
}
......@@ -288,7 +288,7 @@ void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
LINVRNT(cl_lock_invariant(env, lock));
obj = lock->cll_descr.cld_obj;
LINVRNT(obj != NULL);
LINVRNT(obj);
CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
atomic_read(&lock->cll_ref), lock, RETIP);
......@@ -362,7 +362,7 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
struct lu_object_header *head;
lock = kmem_cache_alloc(cl_lock_kmem, GFP_NOFS | __GFP_ZERO);
if (lock != NULL) {
if (lock) {
atomic_set(&lock->cll_ref, 1);
lock->cll_descr = *descr;
lock->cll_state = CLS_NEW;
......@@ -461,7 +461,7 @@ static int cl_lock_fits_into(const struct lu_env *env,
LINVRNT(cl_lock_invariant_trusted(env, lock));
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_fits_into != NULL &&
if (slice->cls_ops->clo_fits_into &&
!slice->cls_ops->clo_fits_into(env, slice, need, io))
return 0;
}
......@@ -524,14 +524,14 @@ static struct cl_lock *cl_lock_find(const struct lu_env *env,
lock = cl_lock_lookup(env, obj, io, need);
spin_unlock(&head->coh_lock_guard);
if (lock == NULL) {
if (!lock) {
lock = cl_lock_alloc(env, obj, io, need);
if (!IS_ERR(lock)) {
struct cl_lock *ghost;
spin_lock(&head->coh_lock_guard);
ghost = cl_lock_lookup(env, obj, io, need);
if (ghost == NULL) {
if (!ghost) {
cl_lock_get_trust(lock);
list_add_tail(&lock->cll_linkage,
&head->coh_locks);
......@@ -572,7 +572,7 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
spin_lock(&head->coh_lock_guard);
lock = cl_lock_lookup(env, obj, io, need);
spin_unlock(&head->coh_lock_guard);
if (lock == NULL)
if (!lock)
return NULL;
cl_lock_mutex_get(env, lock);
......@@ -584,7 +584,7 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
cl_lock_put(env, lock);
lock = NULL;
}
} while (lock == NULL);
} while (!lock);
cl_lock_hold_add(env, lock, scope, source);
cl_lock_user_add(env, lock);
......@@ -775,7 +775,7 @@ static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
lock->cll_flags |= CLF_CANCELLED;
list_for_each_entry_reverse(slice, &lock->cll_layers,
cls_linkage) {
if (slice->cls_ops->clo_cancel != NULL)
if (slice->cls_ops->clo_cancel)
slice->cls_ops->clo_cancel(env, slice);
}
}
......@@ -812,7 +812,7 @@ static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
*/
list_for_each_entry_reverse(slice, &lock->cll_layers,
cls_linkage) {
if (slice->cls_ops->clo_delete != NULL)
if (slice->cls_ops->clo_delete)
slice->cls_ops->clo_delete(env, slice);
}
/*
......@@ -974,7 +974,7 @@ static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
LINVRNT(cl_lock_invariant(env, lock));
list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
if (slice->cls_ops->clo_state != NULL)
if (slice->cls_ops->clo_state)
slice->cls_ops->clo_state(env, slice, state);
wake_up_all(&lock->cll_wq);
}
......@@ -1039,7 +1039,7 @@ static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
result = -ENOSYS;
list_for_each_entry_reverse(slice, &lock->cll_layers,
cls_linkage) {
if (slice->cls_ops->clo_unuse != NULL) {
if (slice->cls_ops->clo_unuse) {
result = slice->cls_ops->clo_unuse(env, slice);
if (result != 0)
break;
......@@ -1072,7 +1072,7 @@ int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
result = -ENOSYS;
state = cl_lock_intransit(env, lock);
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_use != NULL) {
if (slice->cls_ops->clo_use) {
result = slice->cls_ops->clo_use(env, slice);
if (result != 0)
break;
......@@ -1125,7 +1125,7 @@ static int cl_enqueue_kick(const struct lu_env *env,
result = -ENOSYS;
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_enqueue != NULL) {
if (slice->cls_ops->clo_enqueue) {
result = slice->cls_ops->clo_enqueue(env,
slice, io, flags);
if (result != 0)
......@@ -1215,7 +1215,7 @@ int cl_lock_enqueue_wait(const struct lu_env *env,
LASSERT(cl_lock_is_mutexed(lock));
LASSERT(lock->cll_state == CLS_QUEUING);
LASSERT(lock->cll_conflict != NULL);
LASSERT(lock->cll_conflict);
conflict = lock->cll_conflict;
lock->cll_conflict = NULL;
......@@ -1258,7 +1258,7 @@ static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
do {
result = cl_enqueue_try(env, lock, io, enqflags);
if (result == CLO_WAIT) {
if (lock->cll_conflict != NULL)
if (lock->cll_conflict)
result = cl_lock_enqueue_wait(env, lock, 1);
else
result = cl_lock_state_wait(env, lock);
......@@ -1416,7 +1416,7 @@ int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
result = -ENOSYS;
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_wait != NULL) {
if (slice->cls_ops->clo_wait) {
result = slice->cls_ops->clo_wait(env, slice);
if (result != 0)
break;
......@@ -1487,7 +1487,7 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
pound = 0;
list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_weigh != NULL) {
if (slice->cls_ops->clo_weigh) {
ounce = slice->cls_ops->clo_weigh(env, slice);
pound += ounce;
if (pound < ounce) /* over-weight^Wflow */
......@@ -1523,7 +1523,7 @@ int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
LINVRNT(cl_lock_invariant(env, lock));
list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_modify != NULL) {
if (slice->cls_ops->clo_modify) {
result = slice->cls_ops->clo_modify(env, slice, desc);
if (result != 0)
return result;
......@@ -1584,7 +1584,7 @@ int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
result = cl_lock_enclosure(env, lock, closure);
if (result == 0) {
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_closure != NULL) {
if (slice->cls_ops->clo_closure) {
result = slice->cls_ops->clo_closure(env, slice,
closure);
if (result != 0)
......@@ -1820,7 +1820,6 @@ static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
slice = cl_page_at(page, dtype);
LASSERT(slice != NULL);
return slice->cpl_page->cp_index;
}
......@@ -1840,7 +1839,7 @@ static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
/* refresh non-overlapped index */
tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
lock, 1, 0);
if (tmp != NULL) {
if (tmp) {
/* Cache the first-non-overlapped index so as to skip
* all pages within [index, clt_fn_index). This
* is safe because if tmp lock is canceled, it will
......@@ -1950,7 +1949,7 @@ void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
* already destroyed (as otherwise they will be left unprotected).
*/
LASSERT(ergo(!cancel,
head->coh_tree.rnode == NULL && head->coh_pages == 0));
!head->coh_tree.rnode && head->coh_pages == 0));
spin_lock(&head->coh_lock_guard);
while (!list_empty(&head->coh_locks)) {
......@@ -2194,7 +2193,7 @@ void cl_lock_print(const struct lu_env *env, void *cookie,
(*printer)(env, cookie, " %s@%p: ",
slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
slice);
if (slice->cls_ops->clo_print != NULL)
if (slice->cls_ops->clo_print)
slice->cls_ops->clo_print(env, cookie, printer, slice);
(*printer)(env, cookie, "\n");
}
......
......@@ -152,7 +152,7 @@ struct cl_object *cl_object_top(struct cl_object *o)
struct cl_object_header *hdr = cl_object_header(o);
struct cl_object *top;
while (hdr->coh_parent != NULL)
while (hdr->coh_parent)
hdr = hdr->coh_parent;
top = lu2cl(lu_object_top(&hdr->coh_lu));
......@@ -217,7 +217,7 @@ int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
top = obj->co_lu.lo_header;
result = 0;
list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
if (obj->co_ops->coo_attr_get != NULL) {
if (obj->co_ops->coo_attr_get) {
result = obj->co_ops->coo_attr_get(env, obj, attr);
if (result != 0) {
if (result > 0)
......@@ -249,7 +249,7 @@ int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
result = 0;
list_for_each_entry_reverse(obj, &top->loh_layers,
co_lu.lo_linkage) {
if (obj->co_ops->coo_attr_set != NULL) {
if (obj->co_ops->coo_attr_set) {
result = obj->co_ops->coo_attr_set(env, obj, attr, v);
if (result != 0) {
if (result > 0)
......@@ -280,7 +280,7 @@ int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
result = 0;
list_for_each_entry_reverse(obj, &top->loh_layers,
co_lu.lo_linkage) {
if (obj->co_ops->coo_glimpse != NULL) {
if (obj->co_ops->coo_glimpse) {
result = obj->co_ops->coo_glimpse(env, obj, lvb);
if (result != 0)
break;
......@@ -306,7 +306,7 @@ int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
top = obj->co_lu.lo_header;
result = 0;
list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
if (obj->co_ops->coo_conf_set != NULL) {
if (obj->co_ops->coo_conf_set) {
result = obj->co_ops->coo_conf_set(env, obj, conf);
if (result != 0)
break;
......@@ -328,7 +328,7 @@ void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
struct cl_object_header *hdr;
hdr = cl_object_header(obj);
LASSERT(hdr->coh_tree.rnode == NULL);
LASSERT(!hdr->coh_tree.rnode);
LASSERT(hdr->coh_pages == 0);
set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
......@@ -541,7 +541,7 @@ static void cl_env_init0(struct cl_env *cle, void *debug)
{
LASSERT(cle->ce_ref == 0);
LASSERT(cle->ce_magic == &cl_env_init0);
LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL);
LASSERT(!cle->ce_debug && !cle->ce_owner);
cle->ce_ref = 1;
cle->ce_debug = debug;
......@@ -576,7 +576,7 @@ static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn)
{
struct cl_env *cle = cl_env_hops_obj(hn);
LASSERT(cle->ce_owner != NULL);
LASSERT(cle->ce_owner);
return (key == cle->ce_owner);
}
......@@ -610,7 +610,7 @@ static inline void cl_env_attach(struct cl_env *cle)
if (cle) {
int rc;
LASSERT(cle->ce_owner == NULL);
LASSERT(!cle->ce_owner);
cle->ce_owner = (void *) (long) current->pid;
rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
&cle->ce_node);
......@@ -638,7 +638,7 @@ static int cl_env_store_init(void)
CFS_HASH_MAX_THETA,
&cl_env_hops,
CFS_HASH_RW_BKTLOCK);
return cl_env_hash != NULL ? 0 : -ENOMEM;
return cl_env_hash ? 0 : -ENOMEM;
}
static void cl_env_store_fini(void)
......@@ -648,7 +648,7 @@ static void cl_env_store_fini(void)
static inline struct cl_env *cl_env_detach(struct cl_env *cle)
{
if (cle == NULL)
if (!cle)
cle = cl_env_fetch();
if (cle && cle->ce_owner)
......@@ -663,7 +663,7 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
struct cl_env *cle;
cle = kmem_cache_alloc(cl_env_kmem, GFP_NOFS | __GFP_ZERO);
if (cle != NULL) {
if (cle) {
int rc;
INIT_LIST_HEAD(&cle->ce_linkage);
......@@ -717,7 +717,7 @@ static struct lu_env *cl_env_peek(int *refcheck)
env = NULL;
cle = cl_env_fetch();
if (cle != NULL) {
if (cle) {
CL_ENV_INC(hit);
env = &cle->ce_lu;
*refcheck = ++cle->ce_ref;
......@@ -742,7 +742,7 @@ struct lu_env *cl_env_get(int *refcheck)
struct lu_env *env;
env = cl_env_peek(refcheck);
if (env == NULL) {
if (!env) {
env = cl_env_new(lu_context_tags_default,
lu_session_tags_default,
__builtin_return_address(0));
......@@ -769,7 +769,7 @@ struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
{
struct lu_env *env;
LASSERT(cl_env_peek(refcheck) == NULL);
LASSERT(!cl_env_peek(refcheck));
env = cl_env_new(tags, tags, __builtin_return_address(0));
if (!IS_ERR(env)) {
struct cl_env *cle;
......@@ -784,7 +784,7 @@ EXPORT_SYMBOL(cl_env_alloc);
static void cl_env_exit(struct cl_env *cle)
{
LASSERT(cle->ce_owner == NULL);
LASSERT(!cle->ce_owner);
lu_context_exit(&cle->ce_lu.le_ctx);
lu_context_exit(&cle->ce_ses);
}
......@@ -803,7 +803,7 @@ void cl_env_put(struct lu_env *env, int *refcheck)
cle = cl_env_container(env);
LASSERT(cle->ce_ref > 0);
LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck));
LASSERT(ergo(refcheck, cle->ce_ref == *refcheck));
CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
if (--cle->ce_ref == 0) {
......@@ -878,7 +878,7 @@ struct lu_env *cl_env_nested_get(struct cl_env_nest *nest)
nest->cen_cookie = NULL;
env = cl_env_peek(&nest->cen_refcheck);
if (env != NULL) {
if (env) {
if (!cl_io_is_going(env))
return env;
cl_env_put(env, &nest->cen_refcheck);
......@@ -930,14 +930,12 @@ struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
const char *typename;
struct lu_device *d;
LASSERT(ldt != NULL);
typename = ldt->ldt_name;
d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL);
if (!IS_ERR(d)) {
int rc;
if (site != NULL)
if (site)
d->ld_site = site;
rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next);
if (rc == 0) {
......
......@@ -341,7 +341,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
}
if (data->ioc_dev == OBD_DEV_BY_DEVNAME) {
if (data->ioc_inllen4 <= 0 || data->ioc_inlbuf4 == NULL) {
if (data->ioc_inllen4 <= 0 || !data->ioc_inlbuf4) {
err = -EINVAL;
goto out;
}
......@@ -358,7 +358,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
goto out;
}
if (obd == NULL) {
if (!obd) {
CERROR("OBD ioctl : No Device %d\n", data->ioc_dev);
err = -EINVAL;
goto out;
......
......@@ -70,17 +70,16 @@ static struct obd_device *obd_device_alloc(void)
struct obd_device *obd;
obd = kmem_cache_alloc(obd_device_cachep, GFP_NOFS | __GFP_ZERO);
if (obd != NULL)
if (obd)
obd->obd_magic = OBD_DEVICE_MAGIC;
return obd;
}
static void obd_device_free(struct obd_device *obd)
{
LASSERT(obd != NULL);
LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "obd %p obd_magic %08x != %08x\n",
obd, obd->obd_magic, OBD_DEVICE_MAGIC);
if (obd->obd_namespace != NULL) {
if (obd->obd_namespace) {
CERROR("obd %p: namespace %p was not properly cleaned up (obd_force=%d)!\n",
obd, obd->obd_namespace, obd->obd_force);
LBUG();
......@@ -194,7 +193,7 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
goto failed;
}
if (ldt != NULL) {
if (ldt) {
type->typ_lu = ldt;
rc = lu_device_type_init(ldt);
if (rc != 0)
......@@ -356,7 +355,7 @@ void class_release_dev(struct obd_device *obd)
obd, obd->obd_magic, OBD_DEVICE_MAGIC);
LASSERTF(obd == obd_devs[obd->obd_minor], "obd %p != obd_devs[%d] %p\n",
obd, obd->obd_minor, obd_devs[obd->obd_minor]);
LASSERT(obd_type != NULL);
LASSERT(obd_type);
CDEBUG(D_INFO, "Release obd device %s at %d obd_type name =%s\n",
obd->obd_name, obd->obd_minor, obd->obd_type->typ_name);
......@@ -650,7 +649,7 @@ static void class_export_destroy(struct obd_export *exp)
struct obd_device *obd = exp->exp_obd;
LASSERT_ATOMIC_ZERO(&exp->exp_refcount);
LASSERT(obd != NULL);
LASSERT(obd);
CDEBUG(D_IOCTL, "destroying export %p/%s for %s\n", exp,
exp->exp_client_uuid.uuid, obd->obd_name);
......@@ -690,7 +689,6 @@ EXPORT_SYMBOL(class_export_get);
void class_export_put(struct obd_export *exp)
{
LASSERT(exp != NULL);
LASSERT_ATOMIC_GT_LT(&exp->exp_refcount, 0, LI_POISON);
CDEBUG(D_INFO, "PUTting export %p : new refcount %d\n", exp,
atomic_read(&exp->exp_refcount) - 1);
......@@ -942,7 +940,7 @@ EXPORT_SYMBOL(class_new_import);
void class_destroy_import(struct obd_import *import)
{
LASSERT(import != NULL);
LASSERT(import);
LASSERT(import != LP_POISON);
class_handle_unhash(&import->imp_handle);
......@@ -962,8 +960,7 @@ void __class_export_add_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
LASSERT(lock->l_exp_refs_nr >= 0);
if (lock->l_exp_refs_target != NULL &&
lock->l_exp_refs_target != exp) {
if (lock->l_exp_refs_target && lock->l_exp_refs_target != exp) {
LCONSOLE_WARN("setting export %p for lock %p which already has export %p\n",
exp, lock, lock->l_exp_refs_target);
}
......@@ -1005,9 +1002,9 @@ int class_connect(struct lustre_handle *conn, struct obd_device *obd,
{
struct obd_export *export;
LASSERT(conn != NULL);
LASSERT(obd != NULL);
LASSERT(cluuid != NULL);
LASSERT(conn);
LASSERT(obd);
LASSERT(cluuid);
export = class_new_export(obd, cluuid);
if (IS_ERR(export))
......@@ -1133,14 +1130,14 @@ static void obd_zombie_impexp_cull(void)
spin_unlock(&obd_zombie_impexp_lock);
if (import != NULL) {
if (import) {
class_import_destroy(import);
spin_lock(&obd_zombie_impexp_lock);
zombies_count--;
spin_unlock(&obd_zombie_impexp_lock);
}
if (export != NULL) {
if (export) {
class_export_destroy(export);
spin_lock(&obd_zombie_impexp_lock);
zombies_count--;
......@@ -1148,7 +1145,7 @@ static void obd_zombie_impexp_cull(void)
}
cond_resched();
} while (import != NULL || export != NULL);
} while (import || export);
}
static struct completion obd_zombie_start;
......
......@@ -106,7 +106,7 @@ int obd_ioctl_getdata(char **buf, int *len, void __user *arg)
* obdfilter-survey is an example, which relies on ioctl. So we'd
* better avoid vmalloc on ioctl path. LU-66 */
*buf = libcfs_kvzalloc(hdr.ioc_len, GFP_NOFS);
if (*buf == NULL) {
if (!*buf) {
CERROR("Cannot allocate control buffer of len %d\n",
hdr.ioc_len);
return -EINVAL;
......@@ -454,8 +454,7 @@ int class_procfs_init(void)
int class_procfs_clean(void)
{
if (debugfs_lustre_root != NULL)
debugfs_remove_recursive(debugfs_lustre_root);
debugfs_remove_recursive(debugfs_lustre_root);
debugfs_lustre_root = NULL;
......
......@@ -76,8 +76,6 @@ static struct llog_handle *llog_alloc_handle(void)
*/
static void llog_free_handle(struct llog_handle *loghandle)
{
LASSERT(loghandle != NULL);
/* failed llog_init_handle */
if (!loghandle->lgh_hdr)
goto out;
......@@ -115,7 +113,7 @@ static int llog_read_header(const struct lu_env *env,
if (rc)
return rc;
if (lop->lop_read_header == NULL)
if (!lop->lop_read_header)
return -EOPNOTSUPP;
rc = lop->lop_read_header(env, handle);
......@@ -144,7 +142,7 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
struct llog_log_hdr *llh;
int rc;
LASSERT(handle->lgh_hdr == NULL);
LASSERT(!handle->lgh_hdr);
llh = kzalloc(sizeof(*llh), GFP_NOFS);
if (!llh)
......@@ -228,11 +226,11 @@ static int llog_process_thread(void *arg)
return 0;
}
if (cd != NULL) {
if (cd) {
last_called_index = cd->lpcd_first_idx;
index = cd->lpcd_first_idx + 1;
}
if (cd != NULL && cd->lpcd_last_idx)
if (cd && cd->lpcd_last_idx)
last_index = cd->lpcd_last_idx;
else
last_index = LLOG_BITMAP_BYTES * 8 - 1;
......@@ -328,7 +326,7 @@ static int llog_process_thread(void *arg)
}
out:
if (cd != NULL)
if (cd)
cd->lpcd_last_idx = last_called_index;
kfree(buf);
......@@ -419,13 +417,13 @@ int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt,
LASSERT(ctxt);
LASSERT(ctxt->loc_logops);
if (ctxt->loc_logops->lop_open == NULL) {
if (!ctxt->loc_logops->lop_open) {
*lgh = NULL;
return -EOPNOTSUPP;
}
*lgh = llog_alloc_handle();
if (*lgh == NULL)
if (!*lgh)
return -ENOMEM;
(*lgh)->lgh_ctxt = ctxt;
(*lgh)->lgh_logops = ctxt->loc_logops;
......@@ -452,7 +450,7 @@ int llog_close(const struct lu_env *env, struct llog_handle *loghandle)
rc = llog_handle2ops(loghandle, &lop);
if (rc)
goto out;
if (lop->lop_close == NULL) {
if (!lop->lop_close) {
rc = -EOPNOTSUPP;
goto out;
}
......
......@@ -69,7 +69,7 @@ static int llog_cat_id2handle(const struct lu_env *env,
struct llog_handle *loghandle;
int rc = 0;
if (cathandle == NULL)
if (!cathandle)
return -EBADF;
down_write(&cathandle->lgh_lock);
......
......@@ -110,11 +110,8 @@ int llog_cleanup(const struct lu_env *env, struct llog_ctxt *ctxt)
struct obd_llog_group *olg;
int rc, idx;
LASSERT(ctxt != NULL);
LASSERT(ctxt != LP_POISON);
olg = ctxt->loc_olg;
LASSERT(olg != NULL);
LASSERT(olg);
LASSERT(olg != LP_POISON);
idx = ctxt->loc_idx;
......@@ -151,7 +148,7 @@ int llog_setup(const struct lu_env *env, struct obd_device *obd,
if (index < 0 || index >= LLOG_MAX_CTXTS)
return -EINVAL;
LASSERT(olg != NULL);
LASSERT(olg);
ctxt = llog_new_ctxt(obd);
if (!ctxt)
......
......@@ -48,7 +48,7 @@ void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount)
int smp_id;
unsigned long flags = 0;
if (stats == NULL)
if (!stats)
return;
LASSERTF(0 <= idx && idx < stats->ls_num,
......@@ -96,7 +96,7 @@ void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount)
int smp_id;
unsigned long flags = 0;
if (stats == NULL)
if (!stats)
return;
LASSERTF(0 <= idx && idx < stats->ls_num,
......
......@@ -109,7 +109,7 @@ int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep)
__u64 mask = 1;
int i, ret = 0;
for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) {
for (i = 0; obd_connect_names[i]; i++, mask <<= 1) {
if (flags & mask)
ret += snprintf(page + ret, count - ret, "%s%s",
ret ? sep : "", obd_connect_names[i]);
......@@ -199,7 +199,7 @@ int lprocfs_write_frac_helper(const char __user *buffer, unsigned long count,
if (pbuf == end)
return -EINVAL;
if (end != NULL && *end == '.') {
if (end && *end == '.') {
int temp_val, pow = 1;
int i;
......@@ -247,7 +247,7 @@ struct dentry *ldebugfs_add_simple(struct dentry *root,
struct dentry *entry;
umode_t mode = 0;
if (root == NULL || name == NULL || fops == NULL)
if (!root || !name || !fops)
return ERR_PTR(-EINVAL);
if (fops->read)
......@@ -272,7 +272,7 @@ int ldebugfs_add_vars(struct dentry *parent,
if (IS_ERR_OR_NULL(parent) || IS_ERR_OR_NULL(list))
return -EINVAL;
while (list->name != NULL) {
while (list->name) {
struct dentry *entry;
umode_t mode = 0;
......@@ -491,7 +491,7 @@ int lprocfs_rd_server_uuid(struct seq_file *m, void *data)
char *imp_state_name = NULL;
int rc;
LASSERT(obd != NULL);
LASSERT(obd);
rc = lprocfs_climp_check(obd);
if (rc)
return rc;
......@@ -514,7 +514,7 @@ int lprocfs_rd_conn_uuid(struct seq_file *m, void *data)
struct ptlrpc_connection *conn;
int rc;
LASSERT(obd != NULL);
LASSERT(obd);
rc = lprocfs_climp_check(obd);
if (rc)
......@@ -543,7 +543,7 @@ void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
memset(cnt, 0, sizeof(*cnt));
if (stats == NULL) {
if (!stats) {
/* set count to 1 to avoid divide-by-zero errs in callers */
cnt->lc_count = 1;
return;
......@@ -554,7 +554,7 @@ void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
for (i = 0; i < num_entry; i++) {
if (stats->ls_percpu[i] == NULL)
if (!stats->ls_percpu[i])
continue;
percpu_cntr = lprocfs_stats_counter_get(stats, i, idx);
......@@ -604,7 +604,7 @@ static void obd_connect_seq_flags2str(struct seq_file *m, __u64 flags, char *sep
int i;
bool first = true;
for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) {
for (i = 0; obd_connect_names[i]; i++, mask <<= 1) {
if (flags & mask) {
seq_printf(m, "%s%s",
first ? sep : "", obd_connect_names[i]);
......@@ -629,7 +629,7 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
int rw = 0;
int rc;
LASSERT(obd != NULL);
LASSERT(obd);
rc = lprocfs_climp_check(obd);
if (rc)
return rc;
......@@ -665,7 +665,7 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
seq_printf(m, "%s%s", j ? ", " : "", nidstr);
j++;
}
if (imp->imp_connection != NULL)
if (imp->imp_connection)
libcfs_nid2str_r(imp->imp_connection->c_peer.nid,
nidstr, sizeof(nidstr));
else
......@@ -682,7 +682,7 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
atomic_read(&imp->imp_inval_count));
spin_unlock(&imp->imp_lock);
if (obd->obd_svc_stats == NULL)
if (!obd->obd_svc_stats)
goto out_climp;
header = &obd->obd_svc_stats->ls_cnt_header[PTLRPC_REQWAIT_CNTR];
......@@ -779,7 +779,7 @@ int lprocfs_rd_state(struct seq_file *m, void *data)
struct obd_import *imp;
int j, k, rc;
LASSERT(obd != NULL);
LASSERT(obd);
rc = lprocfs_climp_check(obd);
if (rc)
return rc;
......@@ -825,7 +825,7 @@ int lprocfs_rd_timeouts(struct seq_file *m, void *data)
struct dhms ts;
int i, rc;
LASSERT(obd != NULL);
LASSERT(obd);
rc = lprocfs_climp_check(obd);
if (rc)
return rc;
......@@ -967,12 +967,12 @@ int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid)
unsigned long flags = 0;
int i;
LASSERT(stats->ls_percpu[cpuid] == NULL);
LASSERT(!stats->ls_percpu[cpuid]);
LASSERT((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0);
percpusize = lprocfs_stats_counter_size(stats);
LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[cpuid], percpusize);
if (stats->ls_percpu[cpuid] != NULL) {
if (stats->ls_percpu[cpuid]) {
rc = 0;
if (unlikely(stats->ls_biggest_alloc_num <= cpuid)) {
if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
......@@ -1017,7 +1017,7 @@ struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num,
/* alloc percpu pointers for all possible cpu slots */
LIBCFS_ALLOC(stats, offsetof(typeof(*stats), ls_percpu[num_entry]));
if (stats == NULL)
if (!stats)
return NULL;
stats->ls_num = num;
......@@ -1027,14 +1027,14 @@ struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num,
/* alloc num of counter headers */
LIBCFS_ALLOC(stats->ls_cnt_header,
stats->ls_num * sizeof(struct lprocfs_counter_header));
if (stats->ls_cnt_header == NULL)
if (!stats->ls_cnt_header)
goto fail;
if ((flags & LPROCFS_STATS_FLAG_NOPERCPU) != 0) {
/* contains only one set counters */
percpusize = lprocfs_stats_counter_size(stats);
LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[0], percpusize);
if (stats->ls_percpu[0] == NULL)
if (!stats->ls_percpu[0])
goto fail;
stats->ls_biggest_alloc_num = 1;
} else if ((flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) {
......@@ -1059,7 +1059,7 @@ void lprocfs_free_stats(struct lprocfs_stats **statsh)
unsigned int percpusize;
unsigned int i;
if (stats == NULL || stats->ls_num == 0)
if (!stats || stats->ls_num == 0)
return;
*statsh = NULL;
......@@ -1070,9 +1070,9 @@ void lprocfs_free_stats(struct lprocfs_stats **statsh)
percpusize = lprocfs_stats_counter_size(stats);
for (i = 0; i < num_entry; i++)
if (stats->ls_percpu[i] != NULL)
if (stats->ls_percpu[i])
LIBCFS_FREE(stats->ls_percpu[i], percpusize);
if (stats->ls_cnt_header != NULL)
if (stats->ls_cnt_header)
LIBCFS_FREE(stats->ls_cnt_header, stats->ls_num *
sizeof(struct lprocfs_counter_header));
LIBCFS_FREE(stats, offsetof(typeof(*stats), ls_percpu[num_entry]));
......@@ -1090,7 +1090,7 @@ void lprocfs_clear_stats(struct lprocfs_stats *stats)
num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
for (i = 0; i < num_entry; i++) {
if (stats->ls_percpu[i] == NULL)
if (!stats->ls_percpu[i])
continue;
for (j = 0; j < stats->ls_num; j++) {
percpu_cntr = lprocfs_stats_counter_get(stats, i, j);
......@@ -1230,10 +1230,8 @@ void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
unsigned int i;
unsigned int num_cpu;
LASSERT(stats != NULL);
header = &stats->ls_cnt_header[index];
LASSERTF(header != NULL, "Failed to allocate stats header:[%d]%s/%s\n",
LASSERTF(header, "Failed to allocate stats header:[%d]%s/%s\n",
index, name, units);
header->lc_config = conf;
......@@ -1242,7 +1240,7 @@ void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
for (i = 0; i < num_cpu; ++i) {
if (stats->ls_percpu[i] == NULL)
if (!stats->ls_percpu[i])
continue;
percpu_cntr = lprocfs_stats_counter_get(stats, i, index);
percpu_cntr->lc_count = 0;
......@@ -1270,7 +1268,7 @@ __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
{
__s64 ret = 0;
if (lc == NULL || header == NULL)
if (!lc || !header)
return 0;
switch (field) {
......@@ -1412,7 +1410,7 @@ char *lprocfs_find_named_value(const char *buffer, const char *name,
/* there is no strnstr() in rhel5 and ubuntu kernels */
val = lprocfs_strnstr(buffer, name, buflen);
if (val == NULL)
if (!val)
return (char *)buffer;
val += strlen(name); /* skip prefix */
......
......@@ -65,7 +65,7 @@ void class_handle_hash(struct portals_handle *h,
{
struct handle_bucket *bucket;
LASSERT(h != NULL);
LASSERT(h);
LASSERT(list_empty(&h->h_link));
/*
......@@ -140,7 +140,7 @@ void *class_handle2object(__u64 cookie)
struct portals_handle *h;
void *retval = NULL;
LASSERT(handle_hash != NULL);
LASSERT(handle_hash);
/* Be careful when you want to change this code. See the
* rcu_read_lock() definition on top this file. - jxiong */
......@@ -170,7 +170,7 @@ void class_handle_free_cb(struct rcu_head *rcu)
struct portals_handle *h = RCU2HANDLE(rcu);
void *ptr = (void *)(unsigned long)h->h_cookie;
if (h->h_ops->hop_free != NULL)
if (h->h_ops->hop_free)
h->h_ops->hop_free(ptr, h->h_size);
else
kfree(ptr);
......@@ -183,11 +183,11 @@ int class_handle_init(void)
struct timespec64 ts;
int seed[2];
LASSERT(handle_hash == NULL);
LASSERT(!handle_hash);
handle_hash = libcfs_kvzalloc(sizeof(*bucket) * HANDLE_HASH_SIZE,
GFP_NOFS);
if (handle_hash == NULL)
if (!handle_hash)
return -ENOMEM;
spin_lock_init(&handle_base_lock);
......@@ -234,7 +234,7 @@ void class_handle_cleanup(void)
{
int count;
LASSERT(handle_hash != NULL);
LASSERT(handle_hash);
count = cleanup_all_handles();
......
......@@ -151,7 +151,7 @@ int class_del_uuid(const char *uuid)
struct uuid_nid_data *data;
spin_lock(&g_uuid_lock);
if (uuid != NULL) {
if (uuid) {
struct obd_uuid tmp;
obd_str2uuid(&tmp, uuid);
......@@ -165,7 +165,7 @@ int class_del_uuid(const char *uuid)
list_splice_init(&g_uuid_list, &deathrow);
spin_unlock(&g_uuid_lock);
if (uuid != NULL && list_empty(&deathrow)) {
if (uuid && list_empty(&deathrow)) {
CDEBUG(D_INFO, "Try to delete a non-existent uuid %s\n", uuid);
return -EINVAL;
}
......
......@@ -210,7 +210,7 @@ static int class_attach(struct lustre_cfg *lcfg)
name, typename, rc);
goto out;
}
LASSERTF(obd != NULL, "Cannot get obd device %s of type %s\n",
LASSERTF(obd, "Cannot get obd device %s of type %s\n",
name, typename);
LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC,
"obd %p obd_magic %08X != %08X\n",
......@@ -272,9 +272,9 @@ static int class_attach(struct lustre_cfg *lcfg)
obd->obd_minor, typename, atomic_read(&obd->obd_refcount));
return 0;
out:
if (obd != NULL) {
if (obd)
class_release_dev(obd);
}
return rc;
}
......@@ -286,7 +286,7 @@ static int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
int err = 0;
struct obd_export *exp;
LASSERT(obd != NULL);
LASSERT(obd);
LASSERTF(obd == class_num2obd(obd->obd_minor),
"obd %p != obd_devs[%d] %p\n",
obd, obd->obd_minor, class_num2obd(obd->obd_minor));
......@@ -1183,7 +1183,7 @@ int class_config_llog_handler(const struct lu_env *env,
/* we override the llog's uuid for clients, to insure they
are unique */
if (clli && clli->cfg_instance != NULL &&
if (clli && clli->cfg_instance &&
lcfg->lcfg_command == LCFG_ATTACH) {
lustre_cfg_bufs_set_string(&bufs, 2,
clli->cfg_uuid.uuid);
......@@ -1270,7 +1270,7 @@ int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt,
if (cfg) {
cd.lpcd_first_idx = cfg->cfg_last_idx;
callback = cfg->cfg_callback;
LASSERT(callback != NULL);
LASSERT(callback);
} else {
callback = class_config_llog_handler;
}
......
......@@ -518,13 +518,12 @@ static int lustre_free_lsi(struct super_block *sb)
{
struct lustre_sb_info *lsi = s2lsi(sb);
LASSERT(lsi != NULL);
CDEBUG(D_MOUNT, "Freeing lsi %p\n", lsi);
/* someone didn't call server_put_mount. */
LASSERT(atomic_read(&lsi->lsi_mounts) == 0);
if (lsi->lsi_lmd != NULL) {
if (lsi->lsi_lmd) {
kfree(lsi->lsi_lmd->lmd_dev);
kfree(lsi->lsi_lmd->lmd_profile);
kfree(lsi->lsi_lmd->lmd_mgssec);
......@@ -538,7 +537,7 @@ static int lustre_free_lsi(struct super_block *sb)
kfree(lsi->lsi_lmd);
}
LASSERT(lsi->lsi_llsbi == NULL);
LASSERT(!lsi->lsi_llsbi);
kfree(lsi);
s2lsi_nocast(sb) = NULL;
......@@ -551,8 +550,6 @@ static int lustre_put_lsi(struct super_block *sb)
{
struct lustre_sb_info *lsi = s2lsi(sb);
LASSERT(lsi != NULL);
CDEBUG(D_MOUNT, "put %p %d\n", sb, atomic_read(&lsi->lsi_mounts));
if (atomic_dec_and_test(&lsi->lsi_mounts)) {
lustre_free_lsi(sb);
......@@ -588,12 +585,12 @@ static int server_name2fsname(const char *svname, char *fsname,
if (dash == svname)
return -EINVAL;
if (fsname != NULL) {
if (fsname) {
strncpy(fsname, svname, dash - svname);
fsname[dash - svname] = '\0';
}
if (endptr != NULL)
if (endptr)
*endptr = dash;
return 0;
......@@ -627,18 +624,18 @@ static int server_name2index(const char *svname, __u32 *idx,
dash += 3;
if (strncmp(dash, "all", 3) == 0) {
if (endptr != NULL)
if (endptr)
*endptr = dash + 3;
return rc | LDD_F_SV_ALL;
}
index = simple_strtoul(dash, (char **)endptr, 16);
if (idx != NULL)
if (idx)
*idx = index;
/* Account for -mdc after index that is possible when specifying mdt */
if (endptr != NULL && strncmp(LUSTRE_MDC_NAME, *endptr + 1,
sizeof(LUSTRE_MDC_NAME)-1) == 0)
if (endptr && strncmp(LUSTRE_MDC_NAME, *endptr + 1,
sizeof(LUSTRE_MDC_NAME) - 1) == 0)
*endptr += sizeof(LUSTRE_MDC_NAME);
return rc;
......@@ -788,7 +785,7 @@ static int lmd_parse_mgssec(struct lustre_mount_data *lmd, char *ptr)
lmd->lmd_mgssec = NULL;
tail = strchr(ptr, ',');
if (tail == NULL)
if (!tail)
length = strlen(ptr);
else
length = tail - ptr;
......@@ -807,14 +804,14 @@ static int lmd_parse_string(char **handle, char *ptr)
char *tail;
int length;
if ((handle == NULL) || (ptr == NULL))
if (!handle || !ptr)
return -EINVAL;
kfree(*handle);
*handle = NULL;
tail = strchr(ptr, ',');
if (tail == NULL)
if (!tail)
length = strlen(ptr);
else
length = tail - ptr;
......@@ -847,14 +844,14 @@ static int lmd_parse_mgs(struct lustre_mount_data *lmd, char **ptr)
return -EINVAL;
}
if (lmd->lmd_mgs != NULL)
if (lmd->lmd_mgs)
oldlen = strlen(lmd->lmd_mgs) + 1;
mgsnid = kzalloc(oldlen + length + 1, GFP_NOFS);
if (!mgsnid)
return -ENOMEM;
if (lmd->lmd_mgs != NULL) {
if (lmd->lmd_mgs) {
/* Multiple mgsnid= are taken to mean failover locations */
memcpy(mgsnid, lmd->lmd_mgs, oldlen);
mgsnid[oldlen - 1] = ':';
......@@ -981,7 +978,7 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
size_t length, params_length;
char *tail = strchr(s1 + 6, ',');
if (tail == NULL)
if (!tail)
length = strlen(s1);
else
length = tail - s1;
......@@ -1011,7 +1008,7 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
/* Find next opt */
s2 = strchr(s1, ',');
if (s2 == NULL) {
if (!s2) {
if (clear)
*s1 = '\0';
break;
......@@ -1113,9 +1110,9 @@ static int lustre_fill_super(struct super_block *sb, void *data, int silent)
if (lmd_is_client(lmd)) {
CDEBUG(D_MOUNT, "Mounting client %s\n", lmd->lmd_profile);
if (client_fill_super == NULL)
if (!client_fill_super)
request_module("lustre");
if (client_fill_super == NULL) {
if (!client_fill_super) {
LCONSOLE_ERROR_MSG(0x165, "Nothing registered for client mount! Is the 'lustre' module loaded?\n");
lustre_put_lsi(sb);
rc = -ENODEV;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment