Commit a649ad1d authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

staging: lustre: remove cfs_time_t typedef

Just use unsigned long everywhere, like the rest of the kernel does.

Cc: Andreas Dilger <andreas.dilger@intel.com>
Cc: Oleg Drokin <oleg.drokin@intel.com>
Cc: hpdd-discuss <hpdd-discuss@lists.01.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 8718c63e
......@@ -165,7 +165,7 @@ struct ptldebug_header {
#define CDEBUG_DEFAULT_MIN_DELAY ((cfs_time_seconds(1) + 1) / 2) /* jiffies */
#define CDEBUG_DEFAULT_BACKOFF 2
struct cfs_debug_limit_state {
cfs_time_t cdls_next;
unsigned long cdls_next;
unsigned int cdls_delay;
int cdls_count;
};
......
......@@ -50,10 +50,10 @@ void add_wait_queue_exclusive_head(wait_queue_head_t *, wait_queue_t *);
void cfs_init_timer(struct timer_list *t);
void cfs_timer_init(struct timer_list *t, cfs_timer_func_t *func, void *arg);
void cfs_timer_done(struct timer_list *t);
void cfs_timer_arm(struct timer_list *t, cfs_time_t deadline);
void cfs_timer_arm(struct timer_list *t, unsigned long deadline);
void cfs_timer_disarm(struct timer_list *t);
int cfs_timer_is_armed(struct timer_list *t);
cfs_time_t cfs_timer_deadline(struct timer_list *t);
unsigned long cfs_timer_deadline(struct timer_list *t);
/*
* Memory
......
......@@ -43,27 +43,27 @@
* generic time manipulation functions.
*/
static inline cfs_time_t cfs_time_add(cfs_time_t t, cfs_duration_t d)
static inline unsigned long cfs_time_add(unsigned long t, cfs_duration_t d)
{
return (cfs_time_t)(t + d);
return (unsigned long)(t + d);
}
static inline cfs_duration_t cfs_time_sub(cfs_time_t t1, cfs_time_t t2)
static inline cfs_duration_t cfs_time_sub(unsigned long t1, unsigned long t2)
{
return (cfs_time_t)(t1 - t2);
return (unsigned long)(t1 - t2);
}
static inline int cfs_time_after(cfs_time_t t1, cfs_time_t t2)
static inline int cfs_time_after(unsigned long t1, unsigned long t2)
{
return cfs_time_before(t2, t1);
}
static inline int cfs_time_aftereq(cfs_time_t t1, cfs_time_t t2)
static inline int cfs_time_aftereq(unsigned long t1, unsigned long t2)
{
return cfs_time_beforeq(t2, t1);
}
static inline cfs_time_t cfs_time_shift(int seconds)
static inline unsigned long cfs_time_shift(int seconds)
{
return cfs_time_add(cfs_time_current(), cfs_time_seconds(seconds));
}
......@@ -81,7 +81,7 @@ static inline long cfs_timeval_sub(struct timeval *large, struct timeval *small,
return r;
}
static inline void cfs_slow_warning(cfs_time_t now, int seconds, char *msg)
static inline void cfs_slow_warning(unsigned long now, int seconds, char *msg)
{
if (cfs_time_after(cfs_time_current(),
cfs_time_add(now, cfs_time_seconds(15))))
......
......@@ -50,7 +50,7 @@
/*
* Platform provides three opaque data-types:
*
* cfs_time_t represents point in time. This is internal kernel
* unsigned long represents point in time. This is internal kernel
* time rather than "wall clock". This time bears no
* relation to gettimeofday().
*
......@@ -60,11 +60,11 @@
* struct timespec represents instance in world-visible time. This is
* used in file-system time-stamps
*
* cfs_time_t cfs_time_current(void);
* cfs_time_t cfs_time_add (cfs_time_t, cfs_duration_t);
* cfs_duration_t cfs_time_sub (cfs_time_t, cfs_time_t);
* int cfs_impl_time_before (cfs_time_t, cfs_time_t);
* int cfs_impl_time_before_eq(cfs_time_t, cfs_time_t);
* unsigned long cfs_time_current(void);
* unsigned long cfs_time_add (unsigned long, cfs_duration_t);
* cfs_duration_t cfs_time_sub (unsigned long, unsigned long);
* int cfs_impl_time_before (unsigned long, unsigned long);
* int cfs_impl_time_before_eq(unsigned long, unsigned long);
*
* cfs_duration_t cfs_duration_build(int64_t);
*
......@@ -107,20 +107,19 @@ static inline void cfs_fs_time_usec(struct timespec *t, struct timeval *v)
* Generic kernel stuff
*/
typedef unsigned long cfs_time_t; /* jiffies */
typedef long cfs_duration_t;
static inline int cfs_time_before(cfs_time_t t1, cfs_time_t t2)
static inline int cfs_time_before(unsigned long t1, unsigned long t2)
{
return time_before(t1, t2);
}
static inline int cfs_time_beforeq(cfs_time_t t1, cfs_time_t t2)
static inline int cfs_time_beforeq(unsigned long t1, unsigned long t2)
{
return time_before_eq(t1, t2);
}
static inline cfs_time_t cfs_time_current(void)
static inline unsigned long cfs_time_current(void)
{
return jiffies;
}
......
......@@ -88,8 +88,8 @@ struct upcall_cache_entry {
atomic_t ue_refcount;
int ue_flags;
wait_queue_head_t ue_waitq;
cfs_time_t ue_acquire_expire;
cfs_time_t ue_expire;
unsigned long ue_acquire_expire;
unsigned long ue_expire;
union {
struct md_identity identity;
} u;
......
......@@ -643,8 +643,8 @@ lnet_ni_t *lnet_nid2ni_locked(lnet_nid_t nid, int cpt);
lnet_ni_t *lnet_net2ni_locked(__u32 net, int cpt);
lnet_ni_t *lnet_net2ni(__u32 net);
int lnet_notify(lnet_ni_t *ni, lnet_nid_t peer, int alive, cfs_time_t when);
void lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, cfs_time_t when);
int lnet_notify(lnet_ni_t *ni, lnet_nid_t peer, int alive, unsigned long when);
void lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, unsigned long when);
int lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway_nid,
unsigned int priority);
int lnet_check_routes(void);
......
......@@ -362,7 +362,7 @@ typedef struct lnet_lnd {
void (*lnd_notify)(struct lnet_ni *ni, lnet_nid_t peer, int alive);
/* query of peer aliveness */
void (*lnd_query)(struct lnet_ni *ni, lnet_nid_t peer, cfs_time_t *when);
void (*lnd_query)(struct lnet_ni *ni, lnet_nid_t peer, unsigned long *when);
/* accept a new connection */
int (*lnd_accept)(struct lnet_ni *ni, struct socket *sock);
......@@ -457,11 +457,11 @@ typedef struct lnet_peer {
unsigned int lp_ping_notsent; /* SEND event outstanding from ping */
int lp_alive_count; /* # times router went dead<->alive */
long lp_txqnob; /* bytes queued for sending */
cfs_time_t lp_timestamp; /* time of last aliveness news */
cfs_time_t lp_ping_timestamp; /* time of last ping attempt */
cfs_time_t lp_ping_deadline; /* != 0 if ping reply expected */
cfs_time_t lp_last_alive; /* when I was last alive */
cfs_time_t lp_last_query; /* when lp_ni was queried last time */
unsigned long lp_timestamp; /* time of last aliveness news */
unsigned long lp_ping_timestamp; /* time of last ping attempt */
unsigned long lp_ping_deadline; /* != 0 if ping reply expected */
unsigned long lp_last_alive; /* when I was last alive */
unsigned long lp_last_query; /* when lp_ni was queried last time */
lnet_ni_t *lp_ni; /* interface peer is on */
lnet_nid_t lp_nid; /* peer's NID */
int lp_refcount; /* # refs */
......
......@@ -1074,10 +1074,10 @@ kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
}
void
kiblnd_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
kiblnd_query (lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
{
cfs_time_t last_alive = 0;
cfs_time_t now = cfs_time_current();
unsigned long last_alive = 0;
unsigned long now = cfs_time_current();
rwlock_t *glock = &kiblnd_data.kib_global_lock;
kib_peer_t *peer;
unsigned long flags;
......@@ -1509,7 +1509,7 @@ kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, kib_net_t *net,
}
static int
kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, cfs_time_t now)
kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
{
if (fpo->fpo_map_count != 0) /* still in use */
return 0;
......@@ -1524,7 +1524,7 @@ kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
LIST_HEAD (zombies);
kib_fmr_pool_t *fpo = fmr->fmr_pool;
kib_fmr_poolset_t *fps = fpo->fpo_owner;
cfs_time_t now = cfs_time_current();
unsigned long now = cfs_time_current();
kib_fmr_pool_t *tmp;
int rc;
......@@ -1731,7 +1731,7 @@ kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
}
static int
kiblnd_pool_is_idle(kib_pool_t *pool, cfs_time_t now)
kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
{
if (pool->po_allocated != 0) /* still in use */
return 0;
......@@ -1746,7 +1746,7 @@ kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
LIST_HEAD (zombies);
kib_poolset_t *ps = pool->po_owner;
kib_pool_t *tmp;
cfs_time_t now = cfs_time_current();
unsigned long now = cfs_time_current();
spin_lock(&ps->ps_lock);
......
......@@ -195,7 +195,7 @@ typedef struct
char ibd_ifname[KIB_IFNAME_SIZE];
int ibd_nnets; /* # nets extant */
cfs_time_t ibd_next_failover;
unsigned long ibd_next_failover;
int ibd_failed_failover; /* # failover failures */
unsigned int ibd_failover; /* failover in progress */
unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */
......@@ -261,7 +261,7 @@ typedef struct kib_poolset
char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
struct list_head ps_pool_list; /* list of pools */
struct list_head ps_failed_pool_list; /* failed pool list */
cfs_time_t ps_next_retry; /* time stamp for retry if failed to allocate */
unsigned long ps_next_retry; /* time stamp for retry if failed to allocate */
int ps_increasing; /* is allocating new pool */
int ps_pool_size; /* new pool size */
int ps_cpt; /* CPT id */
......@@ -277,7 +277,7 @@ typedef struct kib_pool
struct list_head po_list; /* chain on pool list */
struct list_head po_free_list; /* pre-allocated node */
kib_poolset_t *po_owner; /* pool_set of this pool */
cfs_time_t po_deadline; /* deadline of this pool */
unsigned long po_deadline; /* deadline of this pool */
int po_allocated; /* # of elements in use */
int po_failed; /* pool is created on failed HCA */
int po_size; /* # of pre-allocated elements */
......@@ -317,7 +317,7 @@ typedef struct
/* is allocating new pool */
int fps_increasing;
/* time stamp for retry if failed to allocate */
cfs_time_t fps_next_retry;
unsigned long fps_next_retry;
} kib_fmr_poolset_t;
typedef struct
......@@ -326,7 +326,7 @@ typedef struct
struct kib_hca_dev *fpo_hdev; /* device for this pool */
kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
cfs_time_t fpo_deadline; /* deadline of this pool */
unsigned long fpo_deadline; /* deadline of this pool */
int fpo_failed; /* fmr pool is failed */
int fpo_map_count; /* # of mapped FMR */
} kib_fmr_pool_t;
......@@ -642,7 +642,7 @@ typedef struct kib_peer
int ibp_connecting; /* current active connection attempts */
int ibp_accepting; /* current passive connection attempts */
int ibp_error; /* errno on closing this peer */
cfs_time_t ibp_last_alive; /* when (in jiffies) I was last alive */
unsigned long ibp_last_alive; /* when (in jiffies) I was last alive */
} kib_peer_t;
extern kib_data_t kiblnd_data;
......@@ -990,7 +990,7 @@ void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr);
int kiblnd_startup (lnet_ni_t *ni);
void kiblnd_shutdown (lnet_ni_t *ni);
int kiblnd_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg);
void kiblnd_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
void kiblnd_query (struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
int kiblnd_tunables_init(void);
void kiblnd_tunables_fini(void);
......
......@@ -1830,7 +1830,7 @@ static void
kiblnd_peer_notify (kib_peer_t *peer)
{
int error = 0;
cfs_time_t last_alive = 0;
unsigned long last_alive = 0;
unsigned long flags;
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
......
......@@ -1483,7 +1483,7 @@ void
ksocknal_peer_failed (ksock_peer_t *peer)
{
int notify = 0;
cfs_time_t last_alive = 0;
unsigned long last_alive = 0;
/* There has been a connection failure or comms error; but I'll only
* tell LNET I think the peer is dead if it's to another kernel and
......@@ -1620,7 +1620,7 @@ ksocknal_queue_zombie_conn (ksock_conn_t *conn)
void
ksocknal_destroy_conn (ksock_conn_t *conn)
{
cfs_time_t last_rcv;
unsigned long last_rcv;
/* Final coup-de-grace of the reaper */
CDEBUG (D_NET, "connection %p\n", conn);
......@@ -1789,11 +1789,11 @@ ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
}
void
ksocknal_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
ksocknal_query (lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
{
int connect = 1;
cfs_time_t last_alive = 0;
cfs_time_t now = cfs_time_current();
unsigned long last_alive = 0;
unsigned long now = cfs_time_current();
ksock_peer_t *peer = NULL;
rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
......
......@@ -164,7 +164,7 @@ typedef struct
struct list_head ksnd_zombie_conns; /* conns to free: reaper_lock */
struct list_head ksnd_enomem_conns; /* conns to retry: reaper_lock*/
wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */
cfs_time_t ksnd_reaper_waketime;/* when reaper will wake */
unsigned long ksnd_reaper_waketime;/* when reaper will wake */
spinlock_t ksnd_reaper_lock; /* serialise */
int ksnd_enomem_tx; /* test ENOMEM sender */
......@@ -225,7 +225,7 @@ typedef struct /* transmit packet */
lnet_kiov_t *tx_kiov; /* packet page frags */
struct ksock_conn *tx_conn; /* owning conn */
lnet_msg_t *tx_lnetmsg; /* lnet message for lnet_finalize() */
cfs_time_t tx_deadline; /* when (in jiffies) tx times out */
unsigned long tx_deadline; /* when (in jiffies) tx times out */
ksock_msg_t tx_msg; /* socklnd message buffer */
int tx_desc_size; /* size of this descriptor */
union {
......@@ -280,7 +280,7 @@ typedef struct ksock_conn
/* reader */
struct list_head ksnc_rx_list; /* where I enq waiting input or a forwarding descriptor */
cfs_time_t ksnc_rx_deadline; /* when (in jiffies) receive times out */
unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times out */
__u8 ksnc_rx_started; /* started receiving a message */
__u8 ksnc_rx_ready; /* data ready to read */
__u8 ksnc_rx_scheduled;/* being progressed */
......@@ -305,12 +305,12 @@ typedef struct ksock_conn
struct list_head ksnc_tx_list; /* where I enq waiting for output space */
struct list_head ksnc_tx_queue; /* packets waiting to be sent */
ksock_tx_t *ksnc_tx_carrier; /* next TX that can carry a LNet message or ZC-ACK */
cfs_time_t ksnc_tx_deadline; /* when (in jiffies) tx times out */
unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out */
int ksnc_tx_bufnob; /* send buffer marker */
atomic_t ksnc_tx_nob; /* # bytes queued */
int ksnc_tx_ready; /* write space */
int ksnc_tx_scheduled; /* being progressed */
cfs_time_t ksnc_tx_last_post; /* time stamp of the last posted TX */
unsigned long ksnc_tx_last_post; /* time stamp of the last posted TX */
} ksock_conn_t;
typedef struct ksock_route
......@@ -319,7 +319,7 @@ typedef struct ksock_route
struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */
struct ksock_peer *ksnr_peer; /* owning peer */
atomic_t ksnr_refcount; /* # users */
cfs_time_t ksnr_timeout; /* when (in jiffies) reconnection can happen next */
unsigned long ksnr_timeout; /* when (in jiffies) reconnection can happen next */
cfs_duration_t ksnr_retry_interval; /* how long between retries */
__u32 ksnr_myipaddr; /* my IP */
__u32 ksnr_ipaddr; /* IP address to connect to */
......@@ -337,7 +337,7 @@ typedef struct ksock_route
typedef struct ksock_peer
{
struct list_head ksnp_list; /* stash on global peer list */
cfs_time_t ksnp_last_alive; /* when (in jiffies) I was last alive */
unsigned long ksnp_last_alive; /* when (in jiffies) I was last alive */
lnet_process_id_t ksnp_id; /* who's on the other end(s) */
atomic_t ksnp_refcount; /* # users */
int ksnp_sharecount; /* lconf usage counter */
......@@ -352,7 +352,7 @@ typedef struct ksock_peer
struct list_head ksnp_tx_queue; /* waiting packets */
spinlock_t ksnp_lock; /* serialize, g_lock unsafe */
struct list_head ksnp_zc_req_list; /* zero copy requests wait for ACK */
cfs_time_t ksnp_send_keepalive; /* time to send keepalive */
unsigned long ksnp_send_keepalive; /* time to send keepalive */
lnet_ni_t *ksnp_ni; /* which network */
int ksnp_n_passive_ips; /* # of... */
__u32 ksnp_passive_ips[LNET_MAX_INTERFACES]; /* preferred local interfaces */
......@@ -555,7 +555,7 @@ extern void ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn);
extern void ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist,
int error);
extern void ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
extern void ksocknal_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
extern void ksocknal_query (struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
extern void ksocknal_thread_fini (void);
extern void ksocknal_launch_all_connections_locked (ksock_peer_t *peer);
......
......@@ -780,7 +780,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
ksock_route_t *
ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
{
cfs_time_t now = cfs_time_current();
unsigned long now = cfs_time_current();
struct list_head *tmp;
ksock_route_t *route;
......@@ -1845,7 +1845,7 @@ ksocknal_connect (ksock_route_t *route)
int type;
int wanted;
struct socket *sock;
cfs_time_t deadline;
unsigned long deadline;
int retry_later = 0;
int rc = 0;
......@@ -2111,7 +2111,7 @@ static ksock_route_t *
ksocknal_connd_get_route_locked(signed long *timeout_p)
{
ksock_route_t *route;
cfs_time_t now;
unsigned long now;
now = cfs_time_current();
......@@ -2431,7 +2431,7 @@ ksocknal_check_peer_timeouts (int idx)
read_lock(&ksocknal_data.ksnd_global_lock);
list_for_each_entry(peer, peers, ksnp_list) {
cfs_time_t deadline = 0;
unsigned long deadline = 0;
int resid = 0;
int n = 0;
......@@ -2529,7 +2529,7 @@ ksocknal_reaper (void *arg)
cfs_duration_t timeout;
int i;
int peer_index = 0;
cfs_time_t deadline = cfs_time_current();
unsigned long deadline = cfs_time_current();
cfs_block_allsigs ();
......
......@@ -330,7 +330,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock)
int tms = *timeout_ms;
int wait;
wait_queue_t wl;
cfs_time_t now;
unsigned long now;
if (tms == 0)
return -1; /* don't want to wait and no new event */
......
......@@ -682,7 +682,7 @@ lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
void
lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
{
cfs_time_t last_alive = 0;
unsigned long last_alive = 0;
LASSERT(lnet_peer_aliveness_enabled(lp));
LASSERT(ni->ni_lnd->lnd_query != NULL);
......@@ -699,10 +699,10 @@ lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
/* NB: always called with lnet_net_lock held */
static inline int
lnet_peer_is_alive(lnet_peer_t *lp, cfs_time_t now)
lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now)
{
int alive;
cfs_time_t deadline;
unsigned long deadline;
LASSERT(lnet_peer_aliveness_enabled(lp));
......@@ -734,7 +734,7 @@ lnet_peer_is_alive(lnet_peer_t *lp, cfs_time_t now)
int
lnet_peer_alive_locked(lnet_peer_t *lp)
{
cfs_time_t now = cfs_time_current();
unsigned long now = cfs_time_current();
if (!lnet_peer_aliveness_enabled(lp))
return -ENODEV;
......@@ -747,7 +747,7 @@ lnet_peer_alive_locked(lnet_peer_t *lp)
if (lp->lp_last_query != 0) {
static const int lnet_queryinterval = 1;
cfs_time_t next_query =
unsigned long next_query =
cfs_time_add(lp->lp_last_query,
cfs_time_seconds(lnet_queryinterval));
......
......@@ -107,7 +107,7 @@ lnet_peers_start_down(void)
}
void
lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, cfs_time_t when)
lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, unsigned long when)
{
if (cfs_time_before(when, lp->lp_timestamp)) { /* out of date information */
CDEBUG(D_NET, "Out of date\n");
......@@ -931,7 +931,7 @@ static void
lnet_ping_router_locked (lnet_peer_t *rtr)
{
lnet_rc_data_t *rcd = NULL;
cfs_time_t now = cfs_time_current();
unsigned long now = cfs_time_current();
int secs;
lnet_peer_addref_locked(rtr);
......@@ -1497,10 +1497,10 @@ lnet_rtrpools_alloc(int im_a_router)
}
int
lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, cfs_time_t when)
lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
{
struct lnet_peer *lp = NULL;
cfs_time_t now = cfs_time_current();
unsigned long now = cfs_time_current();
int cpt = lnet_cpt_of_nid(nid);
LASSERT (!in_interrupt ());
......@@ -1576,7 +1576,7 @@ lnet_get_tunables (void)
#else
int
lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, cfs_time_t when)
lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
{
return -EOPNOTSUPP;
}
......
......@@ -333,8 +333,8 @@ int LL_PROC_PROTO(proc_lnet_routers)
if (peer != NULL) {
lnet_nid_t nid = peer->lp_nid;
cfs_time_t now = cfs_time_current();
cfs_time_t deadline = peer->lp_ping_deadline;
unsigned long now = cfs_time_current();
unsigned long deadline = peer->lp_ping_deadline;
int nrefs = peer->lp_refcount;
int nrtrrefs = peer->lp_rtr_refcount;
int alive_cnt = peer->lp_alive_count;
......@@ -511,7 +511,7 @@ int LL_PROC_PROTO(proc_lnet_peers)
aliveness = peer->lp_alive ? "up" : "down";
if (lnet_peer_aliveness_enabled(peer)) {
cfs_time_t now = cfs_time_current();
unsigned long now = cfs_time_current();
cfs_duration_t delta;
delta = cfs_time_sub(now, peer->lp_last_alive);
......
......@@ -504,7 +504,7 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
nd = crpc->crp_node;
dur = (cfs_duration_t)cfs_time_sub(crpc->crp_stamp,
(cfs_time_t)console_session.ses_id.ses_stamp);
(unsigned long)console_session.ses_id.ses_stamp);
cfs_duration_usec(dur, &tv);
if (copy_to_user(&ent->rpe_peer,
......@@ -1274,7 +1274,7 @@ lstcon_rpc_pinger(void *arg)
CDEBUG(D_NET, "Ping %d nodes in session\n", count);
ptimer->stt_expires = (cfs_time_t)(cfs_time_current_sec() + LST_PING_INTERVAL);
ptimer->stt_expires = (unsigned long)(cfs_time_current_sec() + LST_PING_INTERVAL);
stt_add_timer(ptimer);
mutex_unlock(&console_session.ses_mutex);
......@@ -1297,7 +1297,7 @@ lstcon_rpc_pinger_start(void)
}
ptimer = &console_session.ses_ping_timer;
ptimer->stt_expires = (cfs_time_t)(cfs_time_current_sec() + LST_PING_INTERVAL);
ptimer->stt_expires = (unsigned long)(cfs_time_current_sec() + LST_PING_INTERVAL);
stt_add_timer(ptimer);
......
......@@ -75,7 +75,7 @@ typedef struct lstcon_rpc {
/** RPC is embedded in other structure and can't free it */
unsigned int crp_embedded:1;
int crp_status; /* console rpc errors */
cfs_time_t crp_stamp; /* replied time stamp */
unsigned long crp_stamp; /* replied time stamp */
} lstcon_rpc_t;
typedef struct lstcon_rpc_trans {
......
......@@ -56,7 +56,7 @@ typedef struct lstcon_node {
int nd_ref; /* reference count */
int nd_state; /* state of the node */
int nd_timeout; /* session timeout */
cfs_time_t nd_stamp; /* timestamp of last replied RPC */
unsigned long nd_stamp; /* timestamp of last replied RPC */
struct lstcon_rpc nd_ping; /* ping rpc */
} lstcon_node_t; /*** node descriptor */
......
......@@ -334,7 +334,7 @@ typedef struct {
atomic_t sn_refcount;
atomic_t sn_brw_errors;
atomic_t sn_ping_errors;
cfs_time_t sn_started;
unsigned long sn_started;
} sfw_session_t;
#define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \
......
......@@ -60,7 +60,7 @@
struct st_timer_data {
spinlock_t stt_lock;
/* start time of the slot processed previously */
cfs_time_t stt_prev_slot;
unsigned long stt_prev_slot;
struct list_head stt_hash[STTIMER_NSLOTS];
int stt_shuttingdown;
wait_queue_head_t stt_waitq;
......@@ -122,7 +122,7 @@ stt_del_timer(stt_timer_t *timer)
/* called with stt_data.stt_lock held */
int
stt_expire_list(struct list_head *slot, cfs_time_t now)
stt_expire_list(struct list_head *slot, unsigned long now)
{
int expired = 0;
stt_timer_t *timer;
......@@ -146,11 +146,11 @@ stt_expire_list(struct list_head *slot, cfs_time_t now)
}
int
stt_check_timers(cfs_time_t *last)
stt_check_timers(unsigned long *last)
{
int expired = 0;
cfs_time_t now;
cfs_time_t this_slot;
unsigned long now;
unsigned long this_slot;
now = cfs_time_current_sec();
this_slot = now & STTIMER_SLOTTIMEMASK;
......
......@@ -40,7 +40,7 @@
typedef struct {
struct list_head stt_list;
cfs_time_t stt_expires;
unsigned long stt_expires;
void (*stt_func) (void *);
void *stt_data;
} stt_timer_t;
......
......@@ -82,7 +82,7 @@ struct obd_capa {
struct lustre_capa c_capa; /* capa */
atomic_t c_refc; /* ref count */
cfs_time_t c_expiry; /* jiffies */
unsigned long c_expiry; /* jiffies */
spinlock_t c_lock; /* protect capa content */
int c_site;
......@@ -266,7 +266,7 @@ static inline __u64 capa_open_opc(int mode)
static inline void set_capa_expiry(struct obd_capa *ocapa)
{
cfs_time_t expiry = cfs_time_sub((cfs_time_t)ocapa->c_capa.lc_expiry,
unsigned long expiry = cfs_time_sub((unsigned long)ocapa->c_capa.lc_expiry,
cfs_time_current_sec());
ocapa->c_expiry = cfs_time_add(cfs_time_current(),
cfs_time_seconds(expiry));
......
......@@ -441,7 +441,7 @@ struct ldlm_namespace {
* \see ldlm_namespace_dump. Increased by 10 seconds every time
* it is called.
*/
cfs_time_t ns_next_dump;
unsigned long ns_next_dump;
/** "policy" function that does actual lock conflict determination */
ldlm_res_policy ns_policy;
......@@ -783,13 +783,13 @@ struct ldlm_lock {
* Seconds. It will be updated if there is any activity related to
* the lock, e.g. enqueue the lock or send blocking AST.
*/
cfs_time_t l_last_activity;
unsigned long l_last_activity;
/**
* Time last used by e.g. being matched by lock match.
* Jiffies. Should be converted to time if needed.
*/
cfs_time_t l_last_used;
unsigned long l_last_used;
/** Originally requested extent for the extent lock. */
struct ldlm_extent l_req_extent;
......@@ -837,7 +837,7 @@ struct ldlm_lock {
* under this lock.
* \see ost_rw_prolong_locks
*/
cfs_time_t l_callback_timeout;
unsigned long l_callback_timeout;
/** Local PID of process which created this lock. */
__u32 l_pid;
......@@ -951,7 +951,7 @@ struct ldlm_resource {
void *lr_lvb_data;
/** When the resource was considered as contended. */
cfs_time_t lr_contention_time;
unsigned long lr_contention_time;
/** List of references to this resource. For debugging. */
struct lu_ref lr_reference;
......
......@@ -209,7 +209,7 @@ struct obd_export {
/** Last committed transno for this export */
__u64 exp_last_committed;
/** When was last request received */
cfs_time_t exp_last_request_time;
unsigned long exp_last_request_time;
/** On replay all requests waiting for replay are linked here */
struct list_head exp_req_replay_queue;
/**
......@@ -245,7 +245,7 @@ struct obd_export {
enum lustre_sec_part exp_sp_peer;
struct sptlrpc_flavor exp_flvr; /* current */
struct sptlrpc_flavor exp_flvr_old[2]; /* about-to-expire */
cfs_time_t exp_flvr_expire[2]; /* seconds */
unsigned long exp_flvr_expire[2]; /* seconds */
/** protects exp_hp_rpcs */
spinlock_t exp_rpc_lock;
......
......@@ -200,7 +200,7 @@ struct obd_import {
*/
struct ptlrpc_sec *imp_sec;
struct mutex imp_sec_mutex;
cfs_time_t imp_sec_expire;
unsigned long imp_sec_expire;
/** @} */
/** Wait queue for those who need to wait for recovery completion */
......@@ -247,7 +247,7 @@ struct obd_import {
*/
struct lustre_handle imp_remote_handle;
/** When to perform next ping. time in jiffies. */
cfs_time_t imp_next_ping;
unsigned long imp_next_ping;
/** When we last successfully connected. time in 64bit jiffies */
__u64 imp_last_success_conn;
......
......@@ -1715,7 +1715,7 @@ struct ptlrpc_request {
/** optional time limit for send attempts */
cfs_duration_t rq_delay_limit;
/** time request was first queued */
cfs_time_t rq_queued_time;
unsigned long rq_queued_time;
/* server-side... */
/** request arrival time */
......@@ -2407,7 +2407,7 @@ struct ptlrpc_service_part {
/** early reply timer */
struct timer_list scp_at_timer;
/** debug */
cfs_time_t scp_at_checktime;
unsigned long scp_at_checktime;
/** check early replies */
unsigned scp_at_check;
/** @} */
......
......@@ -510,7 +510,7 @@ struct ptlrpc_cli_ctx {
atomic_t cc_refcount;
struct ptlrpc_sec *cc_sec;
struct ptlrpc_ctx_ops *cc_ops;
cfs_time_t cc_expire; /* in seconds */
unsigned long cc_expire; /* in seconds */
unsigned int cc_early_expire:1;
unsigned long cc_flags;
struct vfs_cred cc_vcred;
......@@ -835,8 +835,8 @@ struct ptlrpc_sec {
* garbage collection
*/
struct list_head ps_gc_list;
cfs_time_t ps_gc_interval; /* in seconds */
cfs_time_t ps_gc_next; /* in seconds */
unsigned long ps_gc_interval; /* in seconds */
unsigned long ps_gc_next; /* in seconds */
};
static inline int sec_is_reverse(struct ptlrpc_sec *sec)
......
......@@ -281,7 +281,7 @@ enum llog_ctxt_id {
struct timeout_item {
enum timeout_event ti_event;
cfs_time_t ti_timeout;
unsigned long ti_timeout;
timeout_cb_t ti_cb;
void *ti_cb_data;
struct list_head ti_obd_list;
......@@ -337,7 +337,7 @@ struct client_obd {
* See osc_{reserve|unreserve}_grant for details. */
long cl_reserved_grant;
struct list_head cl_cache_waiters; /* waiting for cache/grant */
cfs_time_t cl_next_shrink_grant; /* jiffies */
unsigned long cl_next_shrink_grant; /* jiffies */
struct list_head cl_grant_shrink_list; /* Timeout event list */
int cl_grant_shrink_interval; /* seconds */
......
......@@ -69,7 +69,7 @@ struct ldlm_cb_async_args {
static struct ldlm_state *ldlm_state;
inline cfs_time_t round_timeout(cfs_time_t timeout)
inline unsigned long round_timeout(unsigned long timeout)
{
return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
}
......
......@@ -95,7 +95,7 @@ int ldlm_expired_completion_wait(void *data)
struct obd_device *obd;
if (lock->l_conn_export == NULL) {
static cfs_time_t next_dump = 0, last_dump = 0;
static unsigned long next_dump = 0, last_dump = 0;
LCONSOLE_WARN("lock timed out (enqueued at "CFS_TIME_T", "
CFS_DURATION_T"s ago)\n",
......@@ -1447,10 +1447,10 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
int unused, int added,
int count)
{
cfs_time_t cur = cfs_time_current();
unsigned long cur = cfs_time_current();
struct ldlm_pool *pl = &ns->ns_pool;
__u64 slv, lvf, lv;
cfs_time_t la;
unsigned long la;
/* Stop LRU processing when we reach past @count or have checked all
* locks in LRU. */
......
......@@ -90,7 +90,7 @@ void cfs_timer_done(struct timer_list *t)
}
EXPORT_SYMBOL(cfs_timer_done);
void cfs_timer_arm(struct timer_list *t, cfs_time_t deadline)
void cfs_timer_arm(struct timer_list *t, unsigned long deadline)
{
mod_timer(t, deadline);
}
......@@ -108,7 +108,7 @@ int cfs_timer_is_armed(struct timer_list *t)
}
EXPORT_SYMBOL(cfs_timer_is_armed);
cfs_time_t cfs_timer_deadline(struct timer_list *t)
unsigned long cfs_timer_deadline(struct timer_list *t)
{
return t->expires;
}
......
......@@ -70,7 +70,7 @@ static unsigned long long ll_capa_renewal_retries;
static int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa);
static inline void update_capa_timer(struct obd_capa *ocapa, cfs_time_t expiry)
static inline void update_capa_timer(struct obd_capa *ocapa, unsigned long expiry)
{
if (cfs_time_before(expiry, ll_capa_timer.expires) ||
!timer_pending(&ll_capa_timer)) {
......@@ -80,7 +80,7 @@ static inline void update_capa_timer(struct obd_capa *ocapa, cfs_time_t expiry)
}
}
static inline cfs_time_t capa_renewal_time(struct obd_capa *ocapa)
static inline unsigned long capa_renewal_time(struct obd_capa *ocapa)
{
return cfs_time_sub(ocapa->c_expiry,
cfs_time_seconds(ocapa->c_capa.lc_timeout) / 2);
......@@ -511,7 +511,7 @@ struct obd_capa *ll_add_capa(struct inode *inode, struct obd_capa *ocapa)
return ocapa;
}
static inline void delay_capa_renew(struct obd_capa *oc, cfs_time_t delay)
static inline void delay_capa_renew(struct obd_capa *oc, unsigned long delay)
{
/* NB: set a fake expiry for this capa to prevent it renew too soon */
oc->c_expiry = cfs_time_add(oc->c_expiry, cfs_time_seconds(delay));
......
......@@ -145,7 +145,7 @@ struct ll_inode_info {
* capability needs renewal */
atomic_t lli_open_count;
struct obd_capa *lli_mds_capa;
cfs_time_t lli_rmtperm_time;
unsigned long lli_rmtperm_time;
/* handle is to be sent to MDS later on done_writing and setattr.
* Open handle data are needed for the recovery to reconstruct
......@@ -213,7 +213,7 @@ struct ll_inode_info {
struct mutex f_write_mutex;
struct rw_semaphore f_glimpse_sem;
cfs_time_t f_glimpse_time;
unsigned long f_glimpse_time;
struct list_head f_agl_list;
__u64 f_agl_index;
......
......@@ -249,7 +249,7 @@ int lustre_check_remote_perm(struct inode *inode, int mask)
struct ptlrpc_request *req = NULL;
struct mdt_remote_perm *perm;
struct obd_capa *oc;
cfs_time_t save;
unsigned long save;
int i = 0, rc;
do {
......
......@@ -118,7 +118,7 @@ struct osc_object {
* True if locking against this stripe got -EUSERS.
*/
int oo_contended;
cfs_time_t oo_contention_time;
unsigned long oo_contention_time;
/**
* List of pages in transfer.
*/
......@@ -387,7 +387,7 @@ struct osc_page {
/**
* Submit time - the time when the page is starting RPC. For debugging.
*/
cfs_time_t ops_submit_time;
unsigned long ops_submit_time;
/**
* A lock of which we hold a reference covers this page. Only used by
......
......@@ -213,8 +213,8 @@ int osc_object_is_contended(struct osc_object *obj)
{
struct osc_device *dev = lu2osc_dev(obj->oo_cl.co_lu.lo_dev);
int osc_contention_time = dev->od_contention_time;
cfs_time_t cur_time = cfs_time_current();
cfs_time_t retry_time;
unsigned long cur_time = cfs_time_current();
unsigned long retry_time;
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_OBJECT_CONTENTION))
return 1;
......
......@@ -352,7 +352,7 @@ static const char *osc_list(struct list_head *head)
return list_empty(head) ? "-" : "+";
}
static inline cfs_time_t osc_submit_duration(struct osc_page *opg)
static inline unsigned long osc_submit_duration(struct osc_page *opg)
{
if (opg->ops_submit_time == 0)
return 0;
......
......@@ -966,8 +966,8 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
static int osc_should_shrink_grant(struct client_obd *client)
{
cfs_time_t time = cfs_time_current();
cfs_time_t next_shrink = client->cl_next_shrink_grant;
unsigned long time = cfs_time_current();
unsigned long next_shrink = client->cl_next_shrink_grant;
if ((client->cl_import->imp_connect_data.ocd_connect_flags &
OBD_CONNECT_GRANT_SHRINK) == 0)
......
......@@ -141,10 +141,10 @@ static inline int ptlrpc_next_reconnect(struct obd_import *imp)
return cfs_time_shift(obd_timeout);
}
cfs_duration_t pinger_check_timeout(cfs_time_t time)
cfs_duration_t pinger_check_timeout(unsigned long time)
{
struct timeout_item *item;
cfs_time_t timeout = PING_INTERVAL;
unsigned long timeout = PING_INTERVAL;
/* The timeout list is a increase order sorted list */
mutex_lock(&pinger_mutex);
......@@ -244,7 +244,7 @@ static int ptlrpc_pinger_main(void *arg)
/* And now, loop forever, pinging as needed. */
while (1) {
cfs_time_t this_ping = cfs_time_current();
unsigned long this_ping = cfs_time_current();
struct l_wait_info lwi;
cfs_duration_t time_to_next_wake;
struct timeout_item *item;
......
......@@ -113,7 +113,7 @@ static struct ptlrpc_enc_page_pool {
unsigned long epp_st_missings; /* # of cache missing */
unsigned long epp_st_lowfree; /* lowest free pages reached */
unsigned int epp_st_max_wqlen; /* highest waitqueue length */
cfs_time_t epp_st_max_wait; /* in jiffies */
unsigned long epp_st_max_wait; /* in jiffies */
/*
* pointers to pools
*/
......@@ -498,7 +498,7 @@ int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
{
wait_queue_t waitlink;
unsigned long this_idle = -1;
cfs_time_t tick = 0;
unsigned long tick = 0;
long now;
int p_idx, g_idx;
int i;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment