Commit 5986a2ec authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/mfasheh/ocfs2

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/mfasheh/ocfs2: (22 commits)
  configfs: Zero terminate data in configfs attribute writes.
  [PATCH] ocfs2 heartbeat: clean up bio submission code
  ocfs2: introduce sc->sc_send_lock to protect outbound outbound messages
  [PATCH] ocfs2: drop INET from Kconfig, not needed
  ocfs2_dlm: Add timeout to dlm join domain
  ocfs2_dlm: Silence some messages during join domain
  ocfs2_dlm: disallow a domain join if node maps mismatch
  ocfs2_dlm: Ensure correct ordering of set/clear refmap bit on lockres
  ocfs2: Binds listener to the configured ip address
  ocfs2_dlm: Calling post handler function in assert master handler
  ocfs2: Added post handler callable function in o2net message handler
  ocfs2_dlm: Cookies in locks not being printed correctly in error messages
  ocfs2_dlm: Silence a failed convert
  ocfs2_dlm: wake up sleepers on the lockres waitqueue
  ocfs2_dlm: Dlm dispatch was stopping too early
  ocfs2_dlm: Drop inflight refmap even if no locks found on the lockres
  ocfs2_dlm: Flush dlm workqueue before starting to migrate
  ocfs2_dlm: Fix migrate lockres handler queue scanning
  ocfs2_dlm: Make dlmunlock() wait for migration to complete
  ocfs2_dlm: Fixes race between migrate and dirty
  ...
parents 43187902 ff05d1c4
...@@ -426,7 +426,6 @@ config OCFS2_FS ...@@ -426,7 +426,6 @@ config OCFS2_FS
select CONFIGFS_FS select CONFIGFS_FS
select JBD select JBD
select CRC32 select CRC32
select INET
help help
OCFS2 is a general purpose extent based shared disk cluster file OCFS2 is a general purpose extent based shared disk cluster file
system with many similarities to ext3. It supports 64 bit inode system with many similarities to ext3. It supports 64 bit inode
......
...@@ -162,14 +162,17 @@ fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size ...@@ -162,14 +162,17 @@ fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size
int error; int error;
if (!buffer->page) if (!buffer->page)
buffer->page = (char *)get_zeroed_page(GFP_KERNEL); buffer->page = (char *)__get_free_pages(GFP_KERNEL, 0);
if (!buffer->page) if (!buffer->page)
return -ENOMEM; return -ENOMEM;
if (count > PAGE_SIZE) if (count >= PAGE_SIZE)
count = PAGE_SIZE; count = PAGE_SIZE - 1;
error = copy_from_user(buffer->page,buf,count); error = copy_from_user(buffer->page,buf,count);
buffer->needs_read_fill = 1; buffer->needs_read_fill = 1;
/* if buf is assumed to contain a string, terminate it by \0,
* so e.g. sscanf() can scan the string easily */
buffer->page[count] = 0;
return error ? -EFAULT : count; return error ? -EFAULT : count;
} }
......
...@@ -184,10 +184,9 @@ static void o2hb_disarm_write_timeout(struct o2hb_region *reg) ...@@ -184,10 +184,9 @@ static void o2hb_disarm_write_timeout(struct o2hb_region *reg)
flush_scheduled_work(); flush_scheduled_work();
} }
static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc, static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc)
unsigned int num_ios)
{ {
atomic_set(&wc->wc_num_reqs, num_ios); atomic_set(&wc->wc_num_reqs, 1);
init_completion(&wc->wc_io_complete); init_completion(&wc->wc_io_complete);
wc->wc_error = 0; wc->wc_error = 0;
} }
...@@ -212,6 +211,7 @@ static void o2hb_wait_on_io(struct o2hb_region *reg, ...@@ -212,6 +211,7 @@ static void o2hb_wait_on_io(struct o2hb_region *reg,
struct address_space *mapping = reg->hr_bdev->bd_inode->i_mapping; struct address_space *mapping = reg->hr_bdev->bd_inode->i_mapping;
blk_run_address_space(mapping); blk_run_address_space(mapping);
o2hb_bio_wait_dec(wc, 1);
wait_for_completion(&wc->wc_io_complete); wait_for_completion(&wc->wc_io_complete);
} }
...@@ -231,6 +231,7 @@ static int o2hb_bio_end_io(struct bio *bio, ...@@ -231,6 +231,7 @@ static int o2hb_bio_end_io(struct bio *bio,
return 1; return 1;
o2hb_bio_wait_dec(wc, 1); o2hb_bio_wait_dec(wc, 1);
bio_put(bio);
return 0; return 0;
} }
...@@ -238,23 +239,22 @@ static int o2hb_bio_end_io(struct bio *bio, ...@@ -238,23 +239,22 @@ static int o2hb_bio_end_io(struct bio *bio,
* start_slot. */ * start_slot. */
static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
struct o2hb_bio_wait_ctxt *wc, struct o2hb_bio_wait_ctxt *wc,
unsigned int start_slot, unsigned int *current_slot,
unsigned int num_slots) unsigned int max_slots)
{ {
int i, nr_vecs, len, first_page, last_page; int len, current_page;
unsigned int vec_len, vec_start; unsigned int vec_len, vec_start;
unsigned int bits = reg->hr_block_bits; unsigned int bits = reg->hr_block_bits;
unsigned int spp = reg->hr_slots_per_page; unsigned int spp = reg->hr_slots_per_page;
unsigned int cs = *current_slot;
struct bio *bio; struct bio *bio;
struct page *page; struct page *page;
nr_vecs = (num_slots + spp - 1) / spp;
/* Testing has shown this allocation to take long enough under /* Testing has shown this allocation to take long enough under
* GFP_KERNEL that the local node can get fenced. It would be * GFP_KERNEL that the local node can get fenced. It would be
* nicest if we could pre-allocate these bios and avoid this * nicest if we could pre-allocate these bios and avoid this
* all together. */ * all together. */
bio = bio_alloc(GFP_ATOMIC, nr_vecs); bio = bio_alloc(GFP_ATOMIC, 16);
if (!bio) { if (!bio) {
mlog(ML_ERROR, "Could not alloc slots BIO!\n"); mlog(ML_ERROR, "Could not alloc slots BIO!\n");
bio = ERR_PTR(-ENOMEM); bio = ERR_PTR(-ENOMEM);
...@@ -262,137 +262,53 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, ...@@ -262,137 +262,53 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
} }
/* Must put everything in 512 byte sectors for the bio... */ /* Must put everything in 512 byte sectors for the bio... */
bio->bi_sector = (reg->hr_start_block + start_slot) << (bits - 9); bio->bi_sector = (reg->hr_start_block + cs) << (bits - 9);
bio->bi_bdev = reg->hr_bdev; bio->bi_bdev = reg->hr_bdev;
bio->bi_private = wc; bio->bi_private = wc;
bio->bi_end_io = o2hb_bio_end_io; bio->bi_end_io = o2hb_bio_end_io;
first_page = start_slot / spp; vec_start = (cs << bits) % PAGE_CACHE_SIZE;
last_page = first_page + nr_vecs; while(cs < max_slots) {
vec_start = (start_slot << bits) % PAGE_CACHE_SIZE; current_page = cs / spp;
for(i = first_page; i < last_page; i++) { page = reg->hr_slot_data[current_page];
page = reg->hr_slot_data[i];
vec_len = PAGE_CACHE_SIZE; vec_len = min(PAGE_CACHE_SIZE,
/* last page might be short */ (max_slots-cs) * (PAGE_CACHE_SIZE/spp) );
if (((i + 1) * spp) > (start_slot + num_slots))
vec_len = ((num_slots + start_slot) % spp) << bits;
vec_len -= vec_start;
mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n", mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n",
i, vec_len, vec_start); current_page, vec_len, vec_start);
len = bio_add_page(bio, page, vec_len, vec_start); len = bio_add_page(bio, page, vec_len, vec_start);
if (len != vec_len) { if (len != vec_len) break;
bio_put(bio);
bio = ERR_PTR(-EIO);
mlog(ML_ERROR, "Error adding page to bio i = %d, "
"vec_len = %u, len = %d\n, start = %u\n",
i, vec_len, len, vec_start);
goto bail;
}
cs += vec_len / (PAGE_CACHE_SIZE/spp);
vec_start = 0; vec_start = 0;
} }
bail: bail:
*current_slot = cs;
return bio; return bio;
} }
/*
* Compute the maximum number of sectors the bdev can handle in one bio,
* as a power of two.
*
* Stolen from oracleasm, thanks Joel!
*/
static int compute_max_sectors(struct block_device *bdev)
{
int max_pages, max_sectors, pow_two_sectors;
struct request_queue *q;
q = bdev_get_queue(bdev);
max_pages = q->max_sectors >> (PAGE_SHIFT - 9);
if (max_pages > BIO_MAX_PAGES)
max_pages = BIO_MAX_PAGES;
if (max_pages > q->max_phys_segments)
max_pages = q->max_phys_segments;
if (max_pages > q->max_hw_segments)
max_pages = q->max_hw_segments;
max_pages--; /* Handle I/Os that straddle a page */
if (max_pages) {
max_sectors = max_pages << (PAGE_SHIFT - 9);
} else {
/* If BIO contains 1 or less than 1 page. */
max_sectors = q->max_sectors;
}
/* Why is fls() 1-based???? */
pow_two_sectors = 1 << (fls(max_sectors) - 1);
return pow_two_sectors;
}
static inline void o2hb_compute_request_limits(struct o2hb_region *reg,
unsigned int num_slots,
unsigned int *num_bios,
unsigned int *slots_per_bio)
{
unsigned int max_sectors, io_sectors;
max_sectors = compute_max_sectors(reg->hr_bdev);
io_sectors = num_slots << (reg->hr_block_bits - 9);
*num_bios = (io_sectors + max_sectors - 1) / max_sectors;
*slots_per_bio = max_sectors >> (reg->hr_block_bits - 9);
mlog(ML_HB_BIO, "My io size is %u sectors for %u slots. This "
"device can handle %u sectors of I/O\n", io_sectors, num_slots,
max_sectors);
mlog(ML_HB_BIO, "Will need %u bios holding %u slots each\n",
*num_bios, *slots_per_bio);
}
static int o2hb_read_slots(struct o2hb_region *reg, static int o2hb_read_slots(struct o2hb_region *reg,
unsigned int max_slots) unsigned int max_slots)
{ {
unsigned int num_bios, slots_per_bio, start_slot, num_slots; unsigned int current_slot=0;
int i, status; int status;
struct o2hb_bio_wait_ctxt wc; struct o2hb_bio_wait_ctxt wc;
struct bio **bios;
struct bio *bio; struct bio *bio;
o2hb_compute_request_limits(reg, max_slots, &num_bios, &slots_per_bio); o2hb_bio_wait_init(&wc);
bios = kcalloc(num_bios, sizeof(struct bio *), GFP_KERNEL); while(current_slot < max_slots) {
if (!bios) { bio = o2hb_setup_one_bio(reg, &wc, &current_slot, max_slots);
status = -ENOMEM;
mlog_errno(status);
return status;
}
o2hb_bio_wait_init(&wc, num_bios);
num_slots = slots_per_bio;
for(i = 0; i < num_bios; i++) {
start_slot = i * slots_per_bio;
/* adjust num_slots at last bio */
if (max_slots < (start_slot + num_slots))
num_slots = max_slots - start_slot;
bio = o2hb_setup_one_bio(reg, &wc, start_slot, num_slots);
if (IS_ERR(bio)) { if (IS_ERR(bio)) {
o2hb_bio_wait_dec(&wc, num_bios - i);
status = PTR_ERR(bio); status = PTR_ERR(bio);
mlog_errno(status); mlog_errno(status);
goto bail_and_wait; goto bail_and_wait;
} }
bios[i] = bio;
atomic_inc(&wc.wc_num_reqs);
submit_bio(READ, bio); submit_bio(READ, bio);
} }
...@@ -403,38 +319,30 @@ static int o2hb_read_slots(struct o2hb_region *reg, ...@@ -403,38 +319,30 @@ static int o2hb_read_slots(struct o2hb_region *reg,
if (wc.wc_error && !status) if (wc.wc_error && !status)
status = wc.wc_error; status = wc.wc_error;
if (bios) {
for(i = 0; i < num_bios; i++)
if (bios[i])
bio_put(bios[i]);
kfree(bios);
}
return status; return status;
} }
static int o2hb_issue_node_write(struct o2hb_region *reg, static int o2hb_issue_node_write(struct o2hb_region *reg,
struct bio **write_bio,
struct o2hb_bio_wait_ctxt *write_wc) struct o2hb_bio_wait_ctxt *write_wc)
{ {
int status; int status;
unsigned int slot; unsigned int slot;
struct bio *bio; struct bio *bio;
o2hb_bio_wait_init(write_wc, 1); o2hb_bio_wait_init(write_wc);
slot = o2nm_this_node(); slot = o2nm_this_node();
bio = o2hb_setup_one_bio(reg, write_wc, slot, 1); bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1);
if (IS_ERR(bio)) { if (IS_ERR(bio)) {
status = PTR_ERR(bio); status = PTR_ERR(bio);
mlog_errno(status); mlog_errno(status);
goto bail; goto bail;
} }
atomic_inc(&write_wc->wc_num_reqs);
submit_bio(WRITE, bio); submit_bio(WRITE, bio);
*write_bio = bio;
status = 0; status = 0;
bail: bail:
return status; return status;
...@@ -826,7 +734,6 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) ...@@ -826,7 +734,6 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
{ {
int i, ret, highest_node, change = 0; int i, ret, highest_node, change = 0;
unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)]; unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
struct bio *write_bio;
struct o2hb_bio_wait_ctxt write_wc; struct o2hb_bio_wait_ctxt write_wc;
ret = o2nm_configured_node_map(configured_nodes, ret = o2nm_configured_node_map(configured_nodes,
...@@ -864,7 +771,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) ...@@ -864,7 +771,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
/* And fire off the write. Note that we don't wait on this I/O /* And fire off the write. Note that we don't wait on this I/O
* until later. */ * until later. */
ret = o2hb_issue_node_write(reg, &write_bio, &write_wc); ret = o2hb_issue_node_write(reg, &write_wc);
if (ret < 0) { if (ret < 0) {
mlog_errno(ret); mlog_errno(ret);
return ret; return ret;
...@@ -882,7 +789,6 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) ...@@ -882,7 +789,6 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
* people we find in our steady state have seen us. * people we find in our steady state have seen us.
*/ */
o2hb_wait_on_io(reg, &write_wc); o2hb_wait_on_io(reg, &write_wc);
bio_put(write_bio);
if (write_wc.wc_error) { if (write_wc.wc_error) {
/* Do not re-arm the write timeout on I/O error - we /* Do not re-arm the write timeout on I/O error - we
* can't be sure that the new block ever made it to * can't be sure that the new block ever made it to
...@@ -943,7 +849,6 @@ static int o2hb_thread(void *data) ...@@ -943,7 +849,6 @@ static int o2hb_thread(void *data)
{ {
int i, ret; int i, ret;
struct o2hb_region *reg = data; struct o2hb_region *reg = data;
struct bio *write_bio;
struct o2hb_bio_wait_ctxt write_wc; struct o2hb_bio_wait_ctxt write_wc;
struct timeval before_hb, after_hb; struct timeval before_hb, after_hb;
unsigned int elapsed_msec; unsigned int elapsed_msec;
...@@ -993,10 +898,9 @@ static int o2hb_thread(void *data) ...@@ -993,10 +898,9 @@ static int o2hb_thread(void *data)
* *
* XXX: Should we skip this on unclean_stop? */ * XXX: Should we skip this on unclean_stop? */
o2hb_prepare_block(reg, 0); o2hb_prepare_block(reg, 0);
ret = o2hb_issue_node_write(reg, &write_bio, &write_wc); ret = o2hb_issue_node_write(reg, &write_wc);
if (ret == 0) { if (ret == 0) {
o2hb_wait_on_io(reg, &write_wc); o2hb_wait_on_io(reg, &write_wc);
bio_put(write_bio);
} else { } else {
mlog_errno(ret); mlog_errno(ret);
} }
......
...@@ -556,6 +556,8 @@ static void o2net_register_callbacks(struct sock *sk, ...@@ -556,6 +556,8 @@ static void o2net_register_callbacks(struct sock *sk,
sk->sk_data_ready = o2net_data_ready; sk->sk_data_ready = o2net_data_ready;
sk->sk_state_change = o2net_state_change; sk->sk_state_change = o2net_state_change;
mutex_init(&sc->sc_send_lock);
write_unlock_bh(&sk->sk_callback_lock); write_unlock_bh(&sk->sk_callback_lock);
} }
...@@ -688,6 +690,7 @@ static void o2net_handler_put(struct o2net_msg_handler *nmh) ...@@ -688,6 +690,7 @@ static void o2net_handler_put(struct o2net_msg_handler *nmh)
* be given to the handler if their payload is longer than the max. */ * be given to the handler if their payload is longer than the max. */
int o2net_register_handler(u32 msg_type, u32 key, u32 max_len, int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
o2net_msg_handler_func *func, void *data, o2net_msg_handler_func *func, void *data,
o2net_post_msg_handler_func *post_func,
struct list_head *unreg_list) struct list_head *unreg_list)
{ {
struct o2net_msg_handler *nmh = NULL; struct o2net_msg_handler *nmh = NULL;
...@@ -722,6 +725,7 @@ int o2net_register_handler(u32 msg_type, u32 key, u32 max_len, ...@@ -722,6 +725,7 @@ int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
nmh->nh_func = func; nmh->nh_func = func;
nmh->nh_func_data = data; nmh->nh_func_data = data;
nmh->nh_post_func = post_func;
nmh->nh_msg_type = msg_type; nmh->nh_msg_type = msg_type;
nmh->nh_max_len = max_len; nmh->nh_max_len = max_len;
nmh->nh_key = key; nmh->nh_key = key;
...@@ -856,10 +860,12 @@ static void o2net_sendpage(struct o2net_sock_container *sc, ...@@ -856,10 +860,12 @@ static void o2net_sendpage(struct o2net_sock_container *sc,
ssize_t ret; ssize_t ret;
mutex_lock(&sc->sc_send_lock);
ret = sc->sc_sock->ops->sendpage(sc->sc_sock, ret = sc->sc_sock->ops->sendpage(sc->sc_sock,
virt_to_page(kmalloced_virt), virt_to_page(kmalloced_virt),
(long)kmalloced_virt & ~PAGE_MASK, (long)kmalloced_virt & ~PAGE_MASK,
size, MSG_DONTWAIT); size, MSG_DONTWAIT);
mutex_unlock(&sc->sc_send_lock);
if (ret != size) { if (ret != size) {
mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
" failed with %zd\n", size, SC_NODEF_ARGS(sc), ret); " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
...@@ -974,8 +980,10 @@ int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, ...@@ -974,8 +980,10 @@ int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
/* finally, convert the message header to network byte-order /* finally, convert the message header to network byte-order
* and send */ * and send */
mutex_lock(&sc->sc_send_lock);
ret = o2net_send_tcp_msg(sc->sc_sock, vec, veclen, ret = o2net_send_tcp_msg(sc->sc_sock, vec, veclen,
sizeof(struct o2net_msg) + caller_bytes); sizeof(struct o2net_msg) + caller_bytes);
mutex_unlock(&sc->sc_send_lock);
msglog(msg, "sending returned %d\n", ret); msglog(msg, "sending returned %d\n", ret);
if (ret < 0) { if (ret < 0) {
mlog(0, "error returned from o2net_send_tcp_msg=%d\n", ret); mlog(0, "error returned from o2net_send_tcp_msg=%d\n", ret);
...@@ -1049,6 +1057,7 @@ static int o2net_process_message(struct o2net_sock_container *sc, ...@@ -1049,6 +1057,7 @@ static int o2net_process_message(struct o2net_sock_container *sc,
int ret = 0, handler_status; int ret = 0, handler_status;
enum o2net_system_error syserr; enum o2net_system_error syserr;
struct o2net_msg_handler *nmh = NULL; struct o2net_msg_handler *nmh = NULL;
void *ret_data = NULL;
msglog(hdr, "processing message\n"); msglog(hdr, "processing message\n");
...@@ -1101,17 +1110,26 @@ static int o2net_process_message(struct o2net_sock_container *sc, ...@@ -1101,17 +1110,26 @@ static int o2net_process_message(struct o2net_sock_container *sc,
sc->sc_msg_type = be16_to_cpu(hdr->msg_type); sc->sc_msg_type = be16_to_cpu(hdr->msg_type);
handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) + handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) +
be16_to_cpu(hdr->data_len), be16_to_cpu(hdr->data_len),
nmh->nh_func_data); nmh->nh_func_data, &ret_data);
do_gettimeofday(&sc->sc_tv_func_stop); do_gettimeofday(&sc->sc_tv_func_stop);
out_respond: out_respond:
/* this destroys the hdr, so don't use it after this */ /* this destroys the hdr, so don't use it after this */
mutex_lock(&sc->sc_send_lock);
ret = o2net_send_status_magic(sc->sc_sock, hdr, syserr, ret = o2net_send_status_magic(sc->sc_sock, hdr, syserr,
handler_status); handler_status);
mutex_unlock(&sc->sc_send_lock);
hdr = NULL; hdr = NULL;
mlog(0, "sending handler status %d, syserr %d returned %d\n", mlog(0, "sending handler status %d, syserr %d returned %d\n",
handler_status, syserr, ret); handler_status, syserr, ret);
if (nmh) {
BUG_ON(ret_data != NULL && nmh->nh_post_func == NULL);
if (nmh->nh_post_func)
(nmh->nh_post_func)(handler_status, nmh->nh_func_data,
ret_data);
}
out: out:
if (nmh) if (nmh)
o2net_handler_put(nmh); o2net_handler_put(nmh);
...@@ -1795,13 +1813,13 @@ static void o2net_listen_data_ready(struct sock *sk, int bytes) ...@@ -1795,13 +1813,13 @@ static void o2net_listen_data_ready(struct sock *sk, int bytes)
ready(sk, bytes); ready(sk, bytes);
} }
static int o2net_open_listening_sock(__be16 port) static int o2net_open_listening_sock(__be32 addr, __be16 port)
{ {
struct socket *sock = NULL; struct socket *sock = NULL;
int ret; int ret;
struct sockaddr_in sin = { struct sockaddr_in sin = {
.sin_family = PF_INET, .sin_family = PF_INET,
.sin_addr = { .s_addr = (__force u32)htonl(INADDR_ANY) }, .sin_addr = { .s_addr = (__force u32)addr },
.sin_port = (__force u16)port, .sin_port = (__force u16)port,
}; };
...@@ -1824,15 +1842,15 @@ static int o2net_open_listening_sock(__be16 port) ...@@ -1824,15 +1842,15 @@ static int o2net_open_listening_sock(__be16 port)
sock->sk->sk_reuse = 1; sock->sk->sk_reuse = 1;
ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
if (ret < 0) { if (ret < 0) {
mlog(ML_ERROR, "unable to bind socket to port %d, ret=%d\n", mlog(ML_ERROR, "unable to bind socket at %u.%u.%u.%u:%u, "
ntohs(port), ret); "ret=%d\n", NIPQUAD(addr), ntohs(port), ret);
goto out; goto out;
} }
ret = sock->ops->listen(sock, 64); ret = sock->ops->listen(sock, 64);
if (ret < 0) { if (ret < 0) {
mlog(ML_ERROR, "unable to listen on port %d, ret=%d\n", mlog(ML_ERROR, "unable to listen on %u.%u.%u.%u:%u, ret=%d\n",
ntohs(port), ret); NIPQUAD(addr), ntohs(port), ret);
} }
out: out:
...@@ -1865,7 +1883,8 @@ int o2net_start_listening(struct o2nm_node *node) ...@@ -1865,7 +1883,8 @@ int o2net_start_listening(struct o2nm_node *node)
return -ENOMEM; /* ? */ return -ENOMEM; /* ? */
} }
ret = o2net_open_listening_sock(node->nd_ipv4_port); ret = o2net_open_listening_sock(node->nd_ipv4_address,
node->nd_ipv4_port);
if (ret) { if (ret) {
destroy_workqueue(o2net_wq); destroy_workqueue(o2net_wq);
o2net_wq = NULL; o2net_wq = NULL;
......
...@@ -50,7 +50,10 @@ struct o2net_msg ...@@ -50,7 +50,10 @@ struct o2net_msg
__u8 buf[0]; __u8 buf[0];
}; };
typedef int (o2net_msg_handler_func)(struct o2net_msg *msg, u32 len, void *data); typedef int (o2net_msg_handler_func)(struct o2net_msg *msg, u32 len, void *data,
void **ret_data);
typedef void (o2net_post_msg_handler_func)(int status, void *data,
void *ret_data);
#define O2NET_MAX_PAYLOAD_BYTES (4096 - sizeof(struct o2net_msg)) #define O2NET_MAX_PAYLOAD_BYTES (4096 - sizeof(struct o2net_msg))
...@@ -99,6 +102,7 @@ int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *vec, ...@@ -99,6 +102,7 @@ int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *vec,
int o2net_register_handler(u32 msg_type, u32 key, u32 max_len, int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
o2net_msg_handler_func *func, void *data, o2net_msg_handler_func *func, void *data,
o2net_post_msg_handler_func *post_func,
struct list_head *unreg_list); struct list_head *unreg_list);
void o2net_unregister_handler_list(struct list_head *list); void o2net_unregister_handler_list(struct list_head *list);
......
...@@ -38,6 +38,12 @@ ...@@ -38,6 +38,12 @@
* locking semantics of the file system using the protocol. It should * locking semantics of the file system using the protocol. It should
* be somewhere else, I'm sure, but right now it isn't. * be somewhere else, I'm sure, but right now it isn't.
* *
* New in version 7:
* - DLM join domain includes the live nodemap
*
* New in version 6:
* - DLM lockres remote refcount fixes.
*
* New in version 5: * New in version 5:
* - Network timeout checking protocol * - Network timeout checking protocol
* *
...@@ -51,7 +57,7 @@ ...@@ -51,7 +57,7 @@
* - full 64 bit i_size in the metadata lock lvbs * - full 64 bit i_size in the metadata lock lvbs
* - introduction of "rw" lock and pushing meta/data locking down * - introduction of "rw" lock and pushing meta/data locking down
*/ */
#define O2NET_PROTOCOL_VERSION 5ULL #define O2NET_PROTOCOL_VERSION 7ULL
struct o2net_handshake { struct o2net_handshake {
__be64 protocol_version; __be64 protocol_version;
__be64 connector_id; __be64 connector_id;
...@@ -149,6 +155,8 @@ struct o2net_sock_container { ...@@ -149,6 +155,8 @@ struct o2net_sock_container {
struct timeval sc_tv_func_stop; struct timeval sc_tv_func_stop;
u32 sc_msg_key; u32 sc_msg_key;
u16 sc_msg_type; u16 sc_msg_type;
struct mutex sc_send_lock;
}; };
struct o2net_msg_handler { struct o2net_msg_handler {
...@@ -158,6 +166,8 @@ struct o2net_msg_handler { ...@@ -158,6 +166,8 @@ struct o2net_msg_handler {
u32 nh_key; u32 nh_key;
o2net_msg_handler_func *nh_func; o2net_msg_handler_func *nh_func;
o2net_msg_handler_func *nh_func_data; o2net_msg_handler_func *nh_func_data;
o2net_post_msg_handler_func
*nh_post_func;
struct kref nh_kref; struct kref nh_kref;
struct list_head nh_unregister_item; struct list_head nh_unregister_item;
}; };
......
...@@ -263,7 +263,8 @@ void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, ...@@ -263,7 +263,8 @@ void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data) int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{ {
int ret; int ret;
unsigned int locklen; unsigned int locklen;
...@@ -311,8 +312,8 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data) ...@@ -311,8 +312,8 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data)
past->type != DLM_BAST) { past->type != DLM_BAST) {
mlog(ML_ERROR, "Unknown ast type! %d, cookie=%u:%llu" mlog(ML_ERROR, "Unknown ast type! %d, cookie=%u:%llu"
"name=%.*s\n", past->type, "name=%.*s\n", past->type,
dlm_get_lock_cookie_node(cookie), dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
dlm_get_lock_cookie_seq(cookie), dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
locklen, name); locklen, name);
ret = DLM_IVLOCKID; ret = DLM_IVLOCKID;
goto leave; goto leave;
...@@ -323,8 +324,8 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data) ...@@ -323,8 +324,8 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data)
mlog(0, "got %sast for unknown lockres! " mlog(0, "got %sast for unknown lockres! "
"cookie=%u:%llu, name=%.*s, namelen=%u\n", "cookie=%u:%llu, name=%.*s, namelen=%u\n",
past->type == DLM_AST ? "" : "b", past->type == DLM_AST ? "" : "b",
dlm_get_lock_cookie_node(cookie), dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
dlm_get_lock_cookie_seq(cookie), dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
locklen, name, locklen); locklen, name, locklen);
ret = DLM_IVLOCKID; ret = DLM_IVLOCKID;
goto leave; goto leave;
...@@ -369,7 +370,8 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data) ...@@ -369,7 +370,8 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data)
mlog(0, "got %sast for unknown lock! cookie=%u:%llu, " mlog(0, "got %sast for unknown lock! cookie=%u:%llu, "
"name=%.*s, namelen=%u\n", past->type == DLM_AST ? "" : "b", "name=%.*s, namelen=%u\n", past->type == DLM_AST ? "" : "b",
dlm_get_lock_cookie_node(cookie), dlm_get_lock_cookie_seq(cookie), dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
locklen, name, locklen); locklen, name, locklen);
ret = DLM_NORMAL; ret = DLM_NORMAL;
......
...@@ -180,6 +180,11 @@ struct dlm_assert_master_priv ...@@ -180,6 +180,11 @@ struct dlm_assert_master_priv
unsigned ignore_higher:1; unsigned ignore_higher:1;
}; };
struct dlm_deref_lockres_priv
{
struct dlm_lock_resource *deref_res;
u8 deref_node;
};
struct dlm_work_item struct dlm_work_item
{ {
...@@ -191,6 +196,7 @@ struct dlm_work_item ...@@ -191,6 +196,7 @@ struct dlm_work_item
struct dlm_request_all_locks_priv ral; struct dlm_request_all_locks_priv ral;
struct dlm_mig_lockres_priv ml; struct dlm_mig_lockres_priv ml;
struct dlm_assert_master_priv am; struct dlm_assert_master_priv am;
struct dlm_deref_lockres_priv dl;
} u; } u;
}; };
...@@ -222,6 +228,9 @@ static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm, ...@@ -222,6 +228,9 @@ static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,
#define DLM_LOCK_RES_DIRTY 0x00000008 #define DLM_LOCK_RES_DIRTY 0x00000008
#define DLM_LOCK_RES_IN_PROGRESS 0x00000010 #define DLM_LOCK_RES_IN_PROGRESS 0x00000010
#define DLM_LOCK_RES_MIGRATING 0x00000020 #define DLM_LOCK_RES_MIGRATING 0x00000020
#define DLM_LOCK_RES_DROPPING_REF 0x00000040
#define DLM_LOCK_RES_BLOCK_DIRTY 0x00001000
#define DLM_LOCK_RES_SETREF_INPROG 0x00002000
/* max milliseconds to wait to sync up a network failure with a node death */ /* max milliseconds to wait to sync up a network failure with a node death */
#define DLM_NODE_DEATH_WAIT_MAX (5 * 1000) #define DLM_NODE_DEATH_WAIT_MAX (5 * 1000)
...@@ -265,6 +274,8 @@ struct dlm_lock_resource ...@@ -265,6 +274,8 @@ struct dlm_lock_resource
u8 owner; //node which owns the lock resource, or unknown u8 owner; //node which owns the lock resource, or unknown
u16 state; u16 state;
char lvb[DLM_LVB_LEN]; char lvb[DLM_LVB_LEN];
unsigned int inflight_locks;
unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
}; };
struct dlm_migratable_lock struct dlm_migratable_lock
...@@ -367,7 +378,7 @@ enum { ...@@ -367,7 +378,7 @@ enum {
DLM_CONVERT_LOCK_MSG, /* 504 */ DLM_CONVERT_LOCK_MSG, /* 504 */
DLM_PROXY_AST_MSG, /* 505 */ DLM_PROXY_AST_MSG, /* 505 */
DLM_UNLOCK_LOCK_MSG, /* 506 */ DLM_UNLOCK_LOCK_MSG, /* 506 */
DLM_UNUSED_MSG2, /* 507 */ DLM_DEREF_LOCKRES_MSG, /* 507 */
DLM_MIGRATE_REQUEST_MSG, /* 508 */ DLM_MIGRATE_REQUEST_MSG, /* 508 */
DLM_MIG_LOCKRES_MSG, /* 509 */ DLM_MIG_LOCKRES_MSG, /* 509 */
DLM_QUERY_JOIN_MSG, /* 510 */ DLM_QUERY_JOIN_MSG, /* 510 */
...@@ -417,6 +428,9 @@ struct dlm_master_request ...@@ -417,6 +428,9 @@ struct dlm_master_request
u8 name[O2NM_MAX_NAME_LEN]; u8 name[O2NM_MAX_NAME_LEN];
}; };
#define DLM_ASSERT_RESPONSE_REASSERT 0x00000001
#define DLM_ASSERT_RESPONSE_MASTERY_REF 0x00000002
#define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001 #define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001
#define DLM_ASSERT_MASTER_REQUERY 0x00000002 #define DLM_ASSERT_MASTER_REQUERY 0x00000002
#define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004 #define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004
...@@ -430,6 +444,8 @@ struct dlm_assert_master ...@@ -430,6 +444,8 @@ struct dlm_assert_master
u8 name[O2NM_MAX_NAME_LEN]; u8 name[O2NM_MAX_NAME_LEN];
}; };
#define DLM_MIGRATE_RESPONSE_MASTERY_REF 0x00000001
struct dlm_migrate_request struct dlm_migrate_request
{ {
u8 master; u8 master;
...@@ -609,12 +625,16 @@ struct dlm_begin_reco ...@@ -609,12 +625,16 @@ struct dlm_begin_reco
}; };
#define BITS_PER_BYTE 8
#define BITS_TO_BYTES(bits) (((bits)+BITS_PER_BYTE-1)/BITS_PER_BYTE)
struct dlm_query_join_request struct dlm_query_join_request
{ {
u8 node_idx; u8 node_idx;
u8 pad1[2]; u8 pad1[2];
u8 name_len; u8 name_len;
u8 domain[O2NM_MAX_NAME_LEN]; u8 domain[O2NM_MAX_NAME_LEN];
u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)];
}; };
struct dlm_assert_joined struct dlm_assert_joined
...@@ -648,6 +668,16 @@ struct dlm_finalize_reco ...@@ -648,6 +668,16 @@ struct dlm_finalize_reco
__be32 pad2; __be32 pad2;
}; };
struct dlm_deref_lockres
{
u32 pad1;
u16 pad2;
u8 node_idx;
u8 namelen;
u8 name[O2NM_MAX_NAME_LEN];
};
static inline enum dlm_status static inline enum dlm_status
__dlm_lockres_state_to_status(struct dlm_lock_resource *res) __dlm_lockres_state_to_status(struct dlm_lock_resource *res)
{ {
...@@ -688,16 +718,20 @@ void dlm_lock_put(struct dlm_lock *lock); ...@@ -688,16 +718,20 @@ void dlm_lock_put(struct dlm_lock *lock);
void dlm_lock_attach_lockres(struct dlm_lock *lock, void dlm_lock_attach_lockres(struct dlm_lock *lock,
struct dlm_lock_resource *res); struct dlm_lock_resource *res);
int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data); int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data); void **ret_data);
int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data); int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data);
int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data);
void dlm_revert_pending_convert(struct dlm_lock_resource *res, void dlm_revert_pending_convert(struct dlm_lock_resource *res,
struct dlm_lock *lock); struct dlm_lock *lock);
void dlm_revert_pending_lock(struct dlm_lock_resource *res, void dlm_revert_pending_lock(struct dlm_lock_resource *res,
struct dlm_lock *lock); struct dlm_lock *lock);
int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data); int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data);
void dlm_commit_pending_cancel(struct dlm_lock_resource *res, void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
struct dlm_lock *lock); struct dlm_lock *lock);
void dlm_commit_pending_unlock(struct dlm_lock_resource *res, void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
...@@ -721,8 +755,6 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, ...@@ -721,8 +755,6 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res); struct dlm_lock_resource *res);
void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res); struct dlm_lock_resource *res);
void dlm_purge_lockres(struct dlm_ctxt *dlm,
struct dlm_lock_resource *lockres);
static inline void dlm_lockres_get(struct dlm_lock_resource *res) static inline void dlm_lockres_get(struct dlm_lock_resource *res)
{ {
/* This is called on every lookup, so it might be worth /* This is called on every lookup, so it might be worth
...@@ -733,6 +765,10 @@ void dlm_lockres_put(struct dlm_lock_resource *res); ...@@ -733,6 +765,10 @@ void dlm_lockres_put(struct dlm_lock_resource *res);
void __dlm_unhash_lockres(struct dlm_lock_resource *res); void __dlm_unhash_lockres(struct dlm_lock_resource *res);
void __dlm_insert_lockres(struct dlm_ctxt *dlm, void __dlm_insert_lockres(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res); struct dlm_lock_resource *res);
struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
const char *name,
unsigned int len,
unsigned int hash);
struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
const char *name, const char *name,
unsigned int len, unsigned int len,
...@@ -753,6 +789,47 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, ...@@ -753,6 +789,47 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
const char *name, const char *name,
unsigned int namelen); unsigned int namelen);
#define dlm_lockres_set_refmap_bit(bit,res) \
__dlm_lockres_set_refmap_bit(bit,res,__FILE__,__LINE__)
#define dlm_lockres_clear_refmap_bit(bit,res) \
__dlm_lockres_clear_refmap_bit(bit,res,__FILE__,__LINE__)
static inline void __dlm_lockres_set_refmap_bit(int bit,
struct dlm_lock_resource *res,
const char *file,
int line)
{
//printk("%s:%d:%.*s: setting bit %d\n", file, line,
// res->lockname.len, res->lockname.name, bit);
set_bit(bit, res->refmap);
}
static inline void __dlm_lockres_clear_refmap_bit(int bit,
struct dlm_lock_resource *res,
const char *file,
int line)
{
//printk("%s:%d:%.*s: clearing bit %d\n", file, line,
// res->lockname.len, res->lockname.name, bit);
clear_bit(bit, res->refmap);
}
void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
const char *file,
int line);
void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
int new_lockres,
const char *file,
int line);
#define dlm_lockres_drop_inflight_ref(d,r) \
__dlm_lockres_drop_inflight_ref(d,r,__FILE__,__LINE__)
#define dlm_lockres_grab_inflight_ref(d,r) \
__dlm_lockres_grab_inflight_ref(d,r,0,__FILE__,__LINE__)
#define dlm_lockres_grab_inflight_ref_new(d,r) \
__dlm_lockres_grab_inflight_ref(d,r,1,__FILE__,__LINE__)
void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
void dlm_do_local_ast(struct dlm_ctxt *dlm, void dlm_do_local_ast(struct dlm_ctxt *dlm,
...@@ -801,10 +878,7 @@ int dlm_heartbeat_init(struct dlm_ctxt *dlm); ...@@ -801,10 +878,7 @@ int dlm_heartbeat_init(struct dlm_ctxt *dlm);
void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data); void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data);
void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data); void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data);
int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
int dlm_migrate_lockres(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
u8 target);
int dlm_finish_migration(struct dlm_ctxt *dlm, int dlm_finish_migration(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res, struct dlm_lock_resource *res,
u8 old_master); u8 old_master);
...@@ -812,15 +886,27 @@ void dlm_lockres_release_ast(struct dlm_ctxt *dlm, ...@@ -812,15 +886,27 @@ void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res); struct dlm_lock_resource *res);
void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res); void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res);
int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data); int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data); void **ret_data);
int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data); int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data); void **ret_data);
int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data); void dlm_assert_master_post_handler(int status, void *data, void *ret_data);
int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data); int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data); void **ret_data);
int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data); int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data); void **ret_data);
int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data);
int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data);
int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data);
int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data);
int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data);
int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data);
int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
u8 nodenum, u8 *real_master); u8 nodenum, u8 *real_master);
...@@ -856,10 +942,12 @@ static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res) ...@@ -856,10 +942,12 @@ static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res)
int dlm_init_mle_cache(void); int dlm_init_mle_cache(void);
void dlm_destroy_mle_cache(void); void dlm_destroy_mle_cache(void);
void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up); void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up);
int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res);
void dlm_clean_master_list(struct dlm_ctxt *dlm, void dlm_clean_master_list(struct dlm_ctxt *dlm,
u8 dead_node); u8 dead_node);
int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
int __dlm_lockres_unused(struct dlm_lock_resource *res); int __dlm_lockres_unused(struct dlm_lock_resource *res);
static inline const char * dlm_lock_mode_name(int mode) static inline const char * dlm_lock_mode_name(int mode)
......
...@@ -286,8 +286,8 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, ...@@ -286,8 +286,8 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
__dlm_print_one_lock_resource(res); __dlm_print_one_lock_resource(res);
mlog(ML_ERROR, "converting a remote lock that is already " mlog(ML_ERROR, "converting a remote lock that is already "
"converting! (cookie=%u:%llu, conv=%d)\n", "converting! (cookie=%u:%llu, conv=%d)\n",
dlm_get_lock_cookie_node(lock->ml.cookie), dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(lock->ml.cookie), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
lock->ml.convert_type); lock->ml.convert_type);
status = DLM_DENIED; status = DLM_DENIED;
goto bail; goto bail;
...@@ -418,7 +418,8 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, ...@@ -418,7 +418,8 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
* returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS, * returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS,
* status from __dlmconvert_master * status from __dlmconvert_master
*/ */
int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data) int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{ {
struct dlm_ctxt *dlm = data; struct dlm_ctxt *dlm = data;
struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf; struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf;
...@@ -428,7 +429,7 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data) ...@@ -428,7 +429,7 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data)
struct dlm_lockstatus *lksb; struct dlm_lockstatus *lksb;
enum dlm_status status = DLM_NORMAL; enum dlm_status status = DLM_NORMAL;
u32 flags; u32 flags;
int call_ast = 0, kick_thread = 0, ast_reserved = 0; int call_ast = 0, kick_thread = 0, ast_reserved = 0, wake = 0;
if (!dlm_grab(dlm)) { if (!dlm_grab(dlm)) {
dlm_error(DLM_REJECTED); dlm_error(DLM_REJECTED);
...@@ -479,25 +480,14 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data) ...@@ -479,25 +480,14 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data)
} }
lock = NULL; lock = NULL;
} }
if (!lock) {
__dlm_print_one_lock_resource(res);
list_for_each(iter, &res->granted) {
lock = list_entry(iter, struct dlm_lock, list);
if (lock->ml.node == cnv->node_idx) {
mlog(ML_ERROR, "There is something here "
"for node %u, lock->ml.cookie=%llu, "
"cnv->cookie=%llu\n", cnv->node_idx,
(unsigned long long)lock->ml.cookie,
(unsigned long long)cnv->cookie);
break;
}
}
lock = NULL;
}
spin_unlock(&res->spinlock); spin_unlock(&res->spinlock);
if (!lock) { if (!lock) {
status = DLM_IVLOCKID; status = DLM_IVLOCKID;
dlm_error(status); mlog(ML_ERROR, "did not find lock to convert on grant queue! "
"cookie=%u:%llu\n",
dlm_get_lock_cookie_node(be64_to_cpu(cnv->cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(cnv->cookie)));
__dlm_print_one_lock_resource(res);
goto leave; goto leave;
} }
...@@ -524,8 +514,11 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data) ...@@ -524,8 +514,11 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data)
cnv->requested_type, cnv->requested_type,
&call_ast, &kick_thread); &call_ast, &kick_thread);
res->state &= ~DLM_LOCK_RES_IN_PROGRESS; res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
wake = 1;
} }
spin_unlock(&res->spinlock); spin_unlock(&res->spinlock);
if (wake)
wake_up(&res->wq);
if (status != DLM_NORMAL) { if (status != DLM_NORMAL) {
if (status != DLM_NOTQUEUED) if (status != DLM_NOTQUEUED)
...@@ -534,12 +527,7 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data) ...@@ -534,12 +527,7 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data)
} }
leave: leave:
if (!lock) if (lock)
mlog(ML_ERROR, "did not find lock to convert on grant queue! "
"cookie=%u:%llu\n",
dlm_get_lock_cookie_node(cnv->cookie),
dlm_get_lock_cookie_seq(cnv->cookie));
else
dlm_lock_put(lock); dlm_lock_put(lock);
/* either queue the ast or release it, if reserved */ /* either queue the ast or release it, if reserved */
......
...@@ -53,6 +53,23 @@ void dlm_print_one_lock_resource(struct dlm_lock_resource *res) ...@@ -53,6 +53,23 @@ void dlm_print_one_lock_resource(struct dlm_lock_resource *res)
spin_unlock(&res->spinlock); spin_unlock(&res->spinlock);
} }
static void dlm_print_lockres_refmap(struct dlm_lock_resource *res)
{
int bit;
assert_spin_locked(&res->spinlock);
mlog(ML_NOTICE, " refmap nodes: [ ");
bit = 0;
while (1) {
bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
if (bit >= O2NM_MAX_NODES)
break;
printk("%u ", bit);
bit++;
}
printk("], inflight=%u\n", res->inflight_locks);
}
void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
{ {
struct list_head *iter2; struct list_head *iter2;
...@@ -65,6 +82,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) ...@@ -65,6 +82,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
res->owner, res->state); res->owner, res->state);
mlog(ML_NOTICE, " last used: %lu, on purge list: %s\n", mlog(ML_NOTICE, " last used: %lu, on purge list: %s\n",
res->last_used, list_empty(&res->purge) ? "no" : "yes"); res->last_used, list_empty(&res->purge) ? "no" : "yes");
dlm_print_lockres_refmap(res);
mlog(ML_NOTICE, " granted queue: \n"); mlog(ML_NOTICE, " granted queue: \n");
list_for_each(iter2, &res->granted) { list_for_each(iter2, &res->granted) {
lock = list_entry(iter2, struct dlm_lock, list); lock = list_entry(iter2, struct dlm_lock, list);
...@@ -72,8 +90,8 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) ...@@ -72,8 +90,8 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, " mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, "
"cookie=%u:%llu, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n", "cookie=%u:%llu, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n",
lock->ml.type, lock->ml.convert_type, lock->ml.node, lock->ml.type, lock->ml.convert_type, lock->ml.node,
dlm_get_lock_cookie_node(lock->ml.cookie), dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(lock->ml.cookie), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
list_empty(&lock->ast_list) ? 'y' : 'n', list_empty(&lock->ast_list) ? 'y' : 'n',
lock->ast_pending ? 'y' : 'n', lock->ast_pending ? 'y' : 'n',
list_empty(&lock->bast_list) ? 'y' : 'n', list_empty(&lock->bast_list) ? 'y' : 'n',
...@@ -87,8 +105,8 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) ...@@ -87,8 +105,8 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, " mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, "
"cookie=%u:%llu, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n", "cookie=%u:%llu, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n",
lock->ml.type, lock->ml.convert_type, lock->ml.node, lock->ml.type, lock->ml.convert_type, lock->ml.node,
dlm_get_lock_cookie_node(lock->ml.cookie), dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(lock->ml.cookie), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
list_empty(&lock->ast_list) ? 'y' : 'n', list_empty(&lock->ast_list) ? 'y' : 'n',
lock->ast_pending ? 'y' : 'n', lock->ast_pending ? 'y' : 'n',
list_empty(&lock->bast_list) ? 'y' : 'n', list_empty(&lock->bast_list) ? 'y' : 'n',
...@@ -102,8 +120,8 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) ...@@ -102,8 +120,8 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, " mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, "
"cookie=%u:%llu, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n", "cookie=%u:%llu, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n",
lock->ml.type, lock->ml.convert_type, lock->ml.node, lock->ml.type, lock->ml.convert_type, lock->ml.node,
dlm_get_lock_cookie_node(lock->ml.cookie), dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(lock->ml.cookie), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
list_empty(&lock->ast_list) ? 'y' : 'n', list_empty(&lock->ast_list) ? 'y' : 'n',
lock->ast_pending ? 'y' : 'n', lock->ast_pending ? 'y' : 'n',
list_empty(&lock->bast_list) ? 'y' : 'n', list_empty(&lock->bast_list) ? 'y' : 'n',
......
This diff is collapsed.
...@@ -163,6 +163,10 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm, ...@@ -163,6 +163,10 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
kick_thread = 1; kick_thread = 1;
} }
} }
/* reduce the inflight count, this may result in the lockres
* being purged below during calc_usage */
if (lock->ml.node == dlm->node_num)
dlm_lockres_drop_inflight_ref(dlm, res);
spin_unlock(&res->spinlock); spin_unlock(&res->spinlock);
wake_up(&res->wq); wake_up(&res->wq);
...@@ -437,7 +441,8 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, ...@@ -437,7 +441,8 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
* held on exit: none * held on exit: none
* returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED * returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED
*/ */
int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data) int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{ {
struct dlm_ctxt *dlm = data; struct dlm_ctxt *dlm = data;
struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf; struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf;
......
This diff is collapsed.
This diff is collapsed.
...@@ -54,9 +54,6 @@ ...@@ -54,9 +54,6 @@
#include "cluster/masklog.h" #include "cluster/masklog.h"
static int dlm_thread(void *data); static int dlm_thread(void *data);
static void dlm_purge_lockres_now(struct dlm_ctxt *dlm,
struct dlm_lock_resource *lockres);
static void dlm_flush_asts(struct dlm_ctxt *dlm); static void dlm_flush_asts(struct dlm_ctxt *dlm);
#define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num) #define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num)
...@@ -82,14 +79,33 @@ void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags) ...@@ -82,14 +79,33 @@ void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags)
current->state = TASK_RUNNING; current->state = TASK_RUNNING;
} }
int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
int __dlm_lockres_unused(struct dlm_lock_resource *res)
{ {
if (list_empty(&res->granted) && if (list_empty(&res->granted) &&
list_empty(&res->converting) && list_empty(&res->converting) &&
list_empty(&res->blocked) && list_empty(&res->blocked))
list_empty(&res->dirty)) return 0;
return 1; return 1;
}
/* "unused": the lockres has no locks, is not on the dirty list,
* has no inflight locks (in the gap between mastery and acquiring
* the first lock), and has no bits in its refmap.
* truly ready to be freed. */
int __dlm_lockres_unused(struct dlm_lock_resource *res)
{
if (!__dlm_lockres_has_locks(res) &&
(list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) {
/* try not to scan the bitmap unless the first two
* conditions are already true */
int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
if (bit >= O2NM_MAX_NODES) {
/* since the bit for dlm->node_num is not
* set, inflight_locks better be zero */
BUG_ON(res->inflight_locks != 0);
return 1;
}
}
return 0; return 0;
} }
...@@ -106,46 +122,21 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, ...@@ -106,46 +122,21 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
assert_spin_locked(&res->spinlock); assert_spin_locked(&res->spinlock);
if (__dlm_lockres_unused(res)){ if (__dlm_lockres_unused(res)){
/* For now, just keep any resource we master */
if (res->owner == dlm->node_num)
{
if (!list_empty(&res->purge)) {
mlog(0, "we master %s:%.*s, but it is on "
"the purge list. Removing\n",
dlm->name, res->lockname.len,
res->lockname.name);
list_del_init(&res->purge);
dlm->purge_count--;
}
return;
}
if (list_empty(&res->purge)) { if (list_empty(&res->purge)) {
mlog(0, "putting lockres %.*s from purge list\n", mlog(0, "putting lockres %.*s:%p onto purge list\n",
res->lockname.len, res->lockname.name); res->lockname.len, res->lockname.name, res);
res->last_used = jiffies; res->last_used = jiffies;
dlm_lockres_get(res);
list_add_tail(&res->purge, &dlm->purge_list); list_add_tail(&res->purge, &dlm->purge_list);
dlm->purge_count++; dlm->purge_count++;
/* if this node is not the owner, there is
* no way to keep track of who the owner could be.
* unhash it to avoid serious problems. */
if (res->owner != dlm->node_num) {
mlog(0, "%s:%.*s: doing immediate "
"purge of lockres owned by %u\n",
dlm->name, res->lockname.len,
res->lockname.name, res->owner);
dlm_purge_lockres_now(dlm, res);
}
} }
} else if (!list_empty(&res->purge)) { } else if (!list_empty(&res->purge)) {
mlog(0, "removing lockres %.*s from purge list, " mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n",
"owner=%u\n", res->lockname.len, res->lockname.name, res->lockname.len, res->lockname.name, res, res->owner);
res->owner);
list_del_init(&res->purge); list_del_init(&res->purge);
dlm_lockres_put(res);
dlm->purge_count--; dlm->purge_count--;
} }
} }
...@@ -163,68 +154,65 @@ void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, ...@@ -163,68 +154,65 @@ void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
spin_unlock(&dlm->spinlock); spin_unlock(&dlm->spinlock);
} }
/* TODO: Eventual API: Called with the dlm spinlock held, may drop it static int dlm_purge_lockres(struct dlm_ctxt *dlm,
* to do migration, but will re-acquire before exit. */ struct dlm_lock_resource *res)
void dlm_purge_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *lockres)
{ {
int master; int master;
int ret; int ret = 0;
spin_lock(&lockres->spinlock);
master = lockres->owner == dlm->node_num;
spin_unlock(&lockres->spinlock);
mlog(0, "purging lockres %.*s, master = %d\n", lockres->lockname.len, spin_lock(&res->spinlock);
lockres->lockname.name, master); if (!__dlm_lockres_unused(res)) {
spin_unlock(&res->spinlock);
/* Non master is the easy case -- no migration required, just mlog(0, "%s:%.*s: tried to purge but not unused\n",
* quit. */ dlm->name, res->lockname.len, res->lockname.name);
return -ENOTEMPTY;
}
master = (res->owner == dlm->node_num);
if (!master) if (!master)
goto finish; res->state |= DLM_LOCK_RES_DROPPING_REF;
spin_unlock(&res->spinlock);
/* Wheee! Migrate lockres here! */
spin_unlock(&dlm->spinlock);
again:
ret = dlm_migrate_lockres(dlm, lockres, O2NM_MAX_NODES); mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
if (ret == -ENOTEMPTY) { res->lockname.name, master);
mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
lockres->lockname.len, lockres->lockname.name);
BUG(); if (!master) {
} else if (ret < 0) { spin_lock(&res->spinlock);
mlog(ML_NOTICE, "lockres %.*s: migrate failed, retrying\n", /* This ensures that clear refmap is sent after the set */
lockres->lockname.len, lockres->lockname.name); __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
msleep(100); spin_unlock(&res->spinlock);
goto again; /* drop spinlock to do messaging, retake below */
spin_unlock(&dlm->spinlock);
/* clear our bit from the master's refmap, ignore errors */
ret = dlm_drop_lockres_ref(dlm, res);
if (ret < 0) {
mlog_errno(ret);
if (!dlm_is_host_down(ret))
BUG();
}
mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
dlm->name, res->lockname.len, res->lockname.name, ret);
spin_lock(&dlm->spinlock);
} }
spin_lock(&dlm->spinlock); if (!list_empty(&res->purge)) {
mlog(0, "removing lockres %.*s:%p from purgelist, "
finish: "master = %d\n", res->lockname.len, res->lockname.name,
if (!list_empty(&lockres->purge)) { res, master);
list_del_init(&lockres->purge); list_del_init(&res->purge);
dlm_lockres_put(res);
dlm->purge_count--; dlm->purge_count--;
} }
__dlm_unhash_lockres(lockres); __dlm_unhash_lockres(res);
}
/* make an unused lockres go away immediately.
* as soon as the dlm spinlock is dropped, this lockres
* will not be found. kfree still happens on last put. */
static void dlm_purge_lockres_now(struct dlm_ctxt *dlm,
struct dlm_lock_resource *lockres)
{
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&lockres->spinlock);
BUG_ON(!__dlm_lockres_unused(lockres)); /* lockres is not in the hash now. drop the flag and wake up
* any processes waiting in dlm_get_lock_resource. */
if (!list_empty(&lockres->purge)) { if (!master) {
list_del_init(&lockres->purge); spin_lock(&res->spinlock);
dlm->purge_count--; res->state &= ~DLM_LOCK_RES_DROPPING_REF;
spin_unlock(&res->spinlock);
wake_up(&res->wq);
} }
__dlm_unhash_lockres(lockres); return 0;
} }
static void dlm_run_purge_list(struct dlm_ctxt *dlm, static void dlm_run_purge_list(struct dlm_ctxt *dlm,
...@@ -268,13 +256,17 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm, ...@@ -268,13 +256,17 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
break; break;
} }
mlog(0, "removing lockres %.*s:%p from purgelist\n",
lockres->lockname.len, lockres->lockname.name, lockres);
list_del_init(&lockres->purge); list_del_init(&lockres->purge);
dlm_lockres_put(lockres);
dlm->purge_count--; dlm->purge_count--;
/* This may drop and reacquire the dlm spinlock if it /* This may drop and reacquire the dlm spinlock if it
* has to do migration. */ * has to do migration. */
mlog(0, "calling dlm_purge_lockres!\n"); mlog(0, "calling dlm_purge_lockres!\n");
dlm_purge_lockres(dlm, lockres); if (dlm_purge_lockres(dlm, lockres))
BUG();
mlog(0, "DONE calling dlm_purge_lockres!\n"); mlog(0, "DONE calling dlm_purge_lockres!\n");
/* Avoid adding any scheduling latencies */ /* Avoid adding any scheduling latencies */
...@@ -467,12 +459,17 @@ void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) ...@@ -467,12 +459,17 @@ void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
assert_spin_locked(&res->spinlock); assert_spin_locked(&res->spinlock);
/* don't shuffle secondary queues */ /* don't shuffle secondary queues */
if ((res->owner == dlm->node_num) && if ((res->owner == dlm->node_num)) {
!(res->state & DLM_LOCK_RES_DIRTY)) { if (res->state & (DLM_LOCK_RES_MIGRATING |
/* ref for dirty_list */ DLM_LOCK_RES_BLOCK_DIRTY))
dlm_lockres_get(res); return;
list_add_tail(&res->dirty, &dlm->dirty_list);
res->state |= DLM_LOCK_RES_DIRTY; if (list_empty(&res->dirty)) {
/* ref for dirty_list */
dlm_lockres_get(res);
list_add_tail(&res->dirty, &dlm->dirty_list);
res->state |= DLM_LOCK_RES_DIRTY;
}
} }
} }
...@@ -651,7 +648,7 @@ static int dlm_thread(void *data) ...@@ -651,7 +648,7 @@ static int dlm_thread(void *data)
dlm_lockres_get(res); dlm_lockres_get(res);
spin_lock(&res->spinlock); spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_DIRTY; /* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
list_del_init(&res->dirty); list_del_init(&res->dirty);
spin_unlock(&res->spinlock); spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock); spin_unlock(&dlm->spinlock);
...@@ -675,10 +672,11 @@ static int dlm_thread(void *data) ...@@ -675,10 +672,11 @@ static int dlm_thread(void *data)
/* it is now ok to move lockreses in these states /* it is now ok to move lockreses in these states
* to the dirty list, assuming that they will only be * to the dirty list, assuming that they will only be
* dirty for a short while. */ * dirty for a short while. */
BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
if (res->state & (DLM_LOCK_RES_IN_PROGRESS | if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
DLM_LOCK_RES_MIGRATING |
DLM_LOCK_RES_RECOVERING)) { DLM_LOCK_RES_RECOVERING)) {
/* move it to the tail and keep going */ /* move it to the tail and keep going */
res->state &= ~DLM_LOCK_RES_DIRTY;
spin_unlock(&res->spinlock); spin_unlock(&res->spinlock);
mlog(0, "delaying list shuffling for in-" mlog(0, "delaying list shuffling for in-"
"progress lockres %.*s, state=%d\n", "progress lockres %.*s, state=%d\n",
...@@ -699,6 +697,7 @@ static int dlm_thread(void *data) ...@@ -699,6 +697,7 @@ static int dlm_thread(void *data)
/* called while holding lockres lock */ /* called while holding lockres lock */
dlm_shuffle_lists(dlm, res); dlm_shuffle_lists(dlm, res);
res->state &= ~DLM_LOCK_RES_DIRTY;
spin_unlock(&res->spinlock); spin_unlock(&res->spinlock);
dlm_lockres_calc_usage(dlm, res); dlm_lockres_calc_usage(dlm, res);
...@@ -709,11 +708,8 @@ static int dlm_thread(void *data) ...@@ -709,11 +708,8 @@ static int dlm_thread(void *data)
/* if the lock was in-progress, stick /* if the lock was in-progress, stick
* it on the back of the list */ * it on the back of the list */
if (delay) { if (delay) {
/* ref for dirty_list */
dlm_lockres_get(res);
spin_lock(&res->spinlock); spin_lock(&res->spinlock);
list_add_tail(&res->dirty, &dlm->dirty_list); __dlm_dirty_lockres(dlm, res);
res->state |= DLM_LOCK_RES_DIRTY;
spin_unlock(&res->spinlock); spin_unlock(&res->spinlock);
} }
dlm_lockres_put(res); dlm_lockres_put(res);
......
...@@ -147,6 +147,10 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, ...@@ -147,6 +147,10 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
goto leave; goto leave;
} }
if (res->state & DLM_LOCK_RES_MIGRATING) {
status = DLM_MIGRATING;
goto leave;
}
/* see above for what the spec says about /* see above for what the spec says about
* LKM_CANCEL and the lock queue state */ * LKM_CANCEL and the lock queue state */
...@@ -244,8 +248,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, ...@@ -244,8 +248,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
/* this should always be coupled with list removal */ /* this should always be coupled with list removal */
BUG_ON(!(actions & DLM_UNLOCK_REMOVE_LOCK)); BUG_ON(!(actions & DLM_UNLOCK_REMOVE_LOCK));
mlog(0, "lock %u:%llu should be gone now! refs=%d\n", mlog(0, "lock %u:%llu should be gone now! refs=%d\n",
dlm_get_lock_cookie_node(lock->ml.cookie), dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(lock->ml.cookie), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
atomic_read(&lock->lock_refs.refcount)-1); atomic_read(&lock->lock_refs.refcount)-1);
dlm_lock_put(lock); dlm_lock_put(lock);
} }
...@@ -379,7 +383,8 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, ...@@ -379,7 +383,8 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
* returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID, * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID,
* return value from dlmunlock_master * return value from dlmunlock_master
*/ */
int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data) int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{ {
struct dlm_ctxt *dlm = data; struct dlm_ctxt *dlm = data;
struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf; struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf;
...@@ -502,8 +507,8 @@ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data) ...@@ -502,8 +507,8 @@ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data)
if (!found) if (!found)
mlog(ML_ERROR, "failed to find lock to unlock! " mlog(ML_ERROR, "failed to find lock to unlock! "
"cookie=%u:%llu\n", "cookie=%u:%llu\n",
dlm_get_lock_cookie_node(unlock->cookie), dlm_get_lock_cookie_node(be64_to_cpu(unlock->cookie)),
dlm_get_lock_cookie_seq(unlock->cookie)); dlm_get_lock_cookie_seq(be64_to_cpu(unlock->cookie)));
else else
dlm_lock_put(lock); dlm_lock_put(lock);
......
...@@ -887,7 +887,7 @@ static inline int ocfs2_translate_response(int response) ...@@ -887,7 +887,7 @@ static inline int ocfs2_translate_response(int response)
static int ocfs2_handle_response_message(struct o2net_msg *msg, static int ocfs2_handle_response_message(struct o2net_msg *msg,
u32 len, u32 len,
void *data) void *data, void **ret_data)
{ {
unsigned int response_id, node_num; unsigned int response_id, node_num;
int response_status; int response_status;
...@@ -943,7 +943,7 @@ static int ocfs2_handle_response_message(struct o2net_msg *msg, ...@@ -943,7 +943,7 @@ static int ocfs2_handle_response_message(struct o2net_msg *msg,
static int ocfs2_handle_vote_message(struct o2net_msg *msg, static int ocfs2_handle_vote_message(struct o2net_msg *msg,
u32 len, u32 len,
void *data) void *data, void **ret_data)
{ {
int status; int status;
struct ocfs2_super *osb = data; struct ocfs2_super *osb = data;
...@@ -1007,7 +1007,7 @@ int ocfs2_register_net_handlers(struct ocfs2_super *osb) ...@@ -1007,7 +1007,7 @@ int ocfs2_register_net_handlers(struct ocfs2_super *osb)
osb->net_key, osb->net_key,
sizeof(struct ocfs2_response_msg), sizeof(struct ocfs2_response_msg),
ocfs2_handle_response_message, ocfs2_handle_response_message,
osb, &osb->osb_net_handlers); osb, NULL, &osb->osb_net_handlers);
if (status) { if (status) {
mlog_errno(status); mlog_errno(status);
goto bail; goto bail;
...@@ -1017,7 +1017,7 @@ int ocfs2_register_net_handlers(struct ocfs2_super *osb) ...@@ -1017,7 +1017,7 @@ int ocfs2_register_net_handlers(struct ocfs2_super *osb)
osb->net_key, osb->net_key,
sizeof(struct ocfs2_vote_msg), sizeof(struct ocfs2_vote_msg),
ocfs2_handle_vote_message, ocfs2_handle_vote_message,
osb, &osb->osb_net_handlers); osb, NULL, &osb->osb_net_handlers);
if (status) { if (status) {
mlog_errno(status); mlog_errno(status);
goto bail; goto bail;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment