Commit e1891cff authored by Corey Minyard's avatar Corey Minyard

ipmi: Make the smi watcher be disabled immediately when not needed

The code to tell the lower layer to enable or disable watching for
certain things was lazy in disabling, it waited until a timer tick
to see if a disable was necessary.  Not a really big deal, but it
could be improved.

Modify the code to enable and disable watching immediately and don't
do it from the background timer any more.
Signed-off-by: default avatarCorey Minyard <cminyard@mvista.com>
Tested-by: default avatarKamlakant Patel <kamlakant.patel@cavium.com>
parent c65ea996
...@@ -534,15 +534,20 @@ struct ipmi_smi { ...@@ -534,15 +534,20 @@ struct ipmi_smi {
atomic_t event_waiters; atomic_t event_waiters;
unsigned int ticks_to_req_ev; unsigned int ticks_to_req_ev;
spinlock_t watch_lock; /* For dealing with watch stuff below. */
/* How many users are waiting for commands? */ /* How many users are waiting for commands? */
atomic_t command_waiters; unsigned int command_waiters;
/* How many users are waiting for watchdogs? */ /* How many users are waiting for watchdogs? */
atomic_t watchdog_waiters; unsigned int watchdog_waiters;
/* How many users are waiting for message responses? */
unsigned int response_waiters;
/* /*
* Tells what the lower layer has last been asked to watch for, * Tells what the lower layer has last been asked to watch for,
* messages and/or watchdogs. Protected by xmit_msgs_lock. * messages and/or watchdogs. Protected by watch_lock.
*/ */
unsigned int last_watch_mask; unsigned int last_watch_mask;
...@@ -938,6 +943,64 @@ static void deliver_err_response(struct ipmi_smi *intf, ...@@ -938,6 +943,64 @@ static void deliver_err_response(struct ipmi_smi *intf,
deliver_local_response(intf, msg); deliver_local_response(intf, msg);
} }
static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
{
unsigned long iflags;
if (!intf->handlers->set_need_watch)
return;
spin_lock_irqsave(&intf->watch_lock, iflags);
if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
intf->response_waiters++;
if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
intf->watchdog_waiters++;
if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
intf->command_waiters++;
if ((intf->last_watch_mask & flags) != flags) {
intf->last_watch_mask |= flags;
intf->handlers->set_need_watch(intf->send_info,
intf->last_watch_mask);
}
spin_unlock_irqrestore(&intf->watch_lock, iflags);
}
static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
{
unsigned long iflags;
if (!intf->handlers->set_need_watch)
return;
spin_lock_irqsave(&intf->watch_lock, iflags);
if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
intf->response_waiters--;
if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
intf->watchdog_waiters--;
if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
intf->command_waiters--;
flags = 0;
if (intf->response_waiters)
flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
if (intf->watchdog_waiters)
flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
if (intf->command_waiters)
flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
if (intf->last_watch_mask != flags) {
intf->last_watch_mask = flags;
intf->handlers->set_need_watch(intf->send_info,
intf->last_watch_mask);
}
spin_unlock_irqrestore(&intf->watch_lock, iflags);
}
/* /*
* Find the next sequence number not being used and add the given * Find the next sequence number not being used and add the given
* message with the given timeout to the sequence table. This must be * message with the given timeout to the sequence table. This must be
...@@ -981,6 +1044,7 @@ static int intf_next_seq(struct ipmi_smi *intf, ...@@ -981,6 +1044,7 @@ static int intf_next_seq(struct ipmi_smi *intf,
*seq = i; *seq = i;
*seqid = intf->seq_table[i].seqid; *seqid = intf->seq_table[i].seqid;
intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
need_waiter(intf); need_waiter(intf);
} else { } else {
rv = -EAGAIN; rv = -EAGAIN;
...@@ -1019,6 +1083,7 @@ static int intf_find_seq(struct ipmi_smi *intf, ...@@ -1019,6 +1083,7 @@ static int intf_find_seq(struct ipmi_smi *intf,
&& (ipmi_addr_equal(addr, &msg->addr))) { && (ipmi_addr_equal(addr, &msg->addr))) {
*recv_msg = msg; *recv_msg = msg;
intf->seq_table[seq].inuse = 0; intf->seq_table[seq].inuse = 0;
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
rv = 0; rv = 0;
} }
} }
...@@ -1080,6 +1145,7 @@ static int intf_err_seq(struct ipmi_smi *intf, ...@@ -1080,6 +1145,7 @@ static int intf_err_seq(struct ipmi_smi *intf,
struct seq_table *ent = &intf->seq_table[seq]; struct seq_table *ent = &intf->seq_table[seq];
ent->inuse = 0; ent->inuse = 0;
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
msg = ent->recv_msg; msg = ent->recv_msg;
rv = 0; rv = 0;
} }
...@@ -1091,30 +1157,6 @@ static int intf_err_seq(struct ipmi_smi *intf, ...@@ -1091,30 +1157,6 @@ static int intf_err_seq(struct ipmi_smi *intf,
return rv; return rv;
} }
/* Must be called with xmit_msgs_lock held. */
static void smi_tell_to_watch(struct ipmi_smi *intf,
unsigned int flags,
struct ipmi_smi_msg *smi_msg)
{
if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) {
if (!smi_msg)
return;
if (!smi_msg->needs_response)
return;
}
if (!intf->handlers->set_need_watch)
return;
if ((intf->last_watch_mask & flags) == flags)
return;
intf->last_watch_mask |= flags;
intf->handlers->set_need_watch(intf->send_info,
intf->last_watch_mask);
}
int ipmi_create_user(unsigned int if_num, int ipmi_create_user(unsigned int if_num,
const struct ipmi_user_hndl *handler, const struct ipmi_user_hndl *handler,
void *handler_data, void *handler_data,
...@@ -1175,12 +1217,9 @@ int ipmi_create_user(unsigned int if_num, ...@@ -1175,12 +1217,9 @@ int ipmi_create_user(unsigned int if_num,
spin_lock_irqsave(&intf->seq_lock, flags); spin_lock_irqsave(&intf->seq_lock, flags);
list_add_rcu(&new_user->link, &intf->users); list_add_rcu(&new_user->link, &intf->users);
spin_unlock_irqrestore(&intf->seq_lock, flags); spin_unlock_irqrestore(&intf->seq_lock, flags);
if (handler->ipmi_watchdog_pretimeout) { if (handler->ipmi_watchdog_pretimeout)
/* User wants pretimeouts, so make sure to watch for them. */ /* User wants pretimeouts, so make sure to watch for them. */
if (atomic_inc_return(&intf->watchdog_waiters) == 1) smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG,
NULL);
}
srcu_read_unlock(&ipmi_interfaces_srcu, index); srcu_read_unlock(&ipmi_interfaces_srcu, index);
*user = new_user; *user = new_user;
return 0; return 0;
...@@ -1251,7 +1290,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user) ...@@ -1251,7 +1290,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
user->handler->shutdown(user->handler_data); user->handler->shutdown(user->handler_data);
if (user->handler->ipmi_watchdog_pretimeout) if (user->handler->ipmi_watchdog_pretimeout)
atomic_dec(&intf->watchdog_waiters); smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
if (user->gets_events) if (user->gets_events)
atomic_dec(&intf->event_waiters); atomic_dec(&intf->event_waiters);
...@@ -1264,6 +1303,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user) ...@@ -1264,6 +1303,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
if (intf->seq_table[i].inuse if (intf->seq_table[i].inuse
&& (intf->seq_table[i].recv_msg->user == user)) { && (intf->seq_table[i].recv_msg->user == user)) {
intf->seq_table[i].inuse = 0; intf->seq_table[i].inuse = 0;
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
ipmi_free_recv_msg(intf->seq_table[i].recv_msg); ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
} }
} }
...@@ -1606,8 +1646,7 @@ int ipmi_register_for_cmd(struct ipmi_user *user, ...@@ -1606,8 +1646,7 @@ int ipmi_register_for_cmd(struct ipmi_user *user,
goto out_unlock; goto out_unlock;
} }
if (atomic_inc_return(&intf->command_waiters) == 1) smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS, NULL);
list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
...@@ -1657,7 +1696,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user, ...@@ -1657,7 +1696,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
synchronize_rcu(); synchronize_rcu();
release_ipmi_user(user, index); release_ipmi_user(user, index);
while (rcvrs) { while (rcvrs) {
atomic_dec(&intf->command_waiters); smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
rcvr = rcvrs; rcvr = rcvrs;
rcvrs = rcvr->next; rcvrs = rcvr->next;
kfree(rcvr); kfree(rcvr);
...@@ -1785,8 +1824,6 @@ static void smi_send(struct ipmi_smi *intf, ...@@ -1785,8 +1824,6 @@ static void smi_send(struct ipmi_smi *intf,
spin_lock_irqsave(&intf->xmit_msgs_lock, flags); spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
smi_msg = smi_add_send_msg(intf, smi_msg, priority); smi_msg = smi_add_send_msg(intf, smi_msg, priority);
smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES, smi_msg);
if (!run_to_completion) if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
...@@ -1986,9 +2023,6 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf, ...@@ -1986,9 +2023,6 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
ipmb_seq, broadcast, ipmb_seq, broadcast,
source_address, source_lun); source_address, source_lun);
/* We will be getting a response in the BMC message queue. */
smi_msg->needs_response = true;
/* /*
* Copy the message into the recv message data, so we * Copy the message into the recv message data, so we
* can retransmit it later if necessary. * can retransmit it later if necessary.
...@@ -2176,7 +2210,6 @@ static int i_ipmi_request(struct ipmi_user *user, ...@@ -2176,7 +2210,6 @@ static int i_ipmi_request(struct ipmi_user *user,
goto out; goto out;
} }
} }
smi_msg->needs_response = false;
rcu_read_lock(); rcu_read_lock();
if (intf->in_shutdown) { if (intf->in_shutdown) {
...@@ -3390,9 +3423,8 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, ...@@ -3390,9 +3423,8 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
INIT_LIST_HEAD(&intf->xmit_msgs); INIT_LIST_HEAD(&intf->xmit_msgs);
INIT_LIST_HEAD(&intf->hp_xmit_msgs); INIT_LIST_HEAD(&intf->hp_xmit_msgs);
spin_lock_init(&intf->events_lock); spin_lock_init(&intf->events_lock);
spin_lock_init(&intf->watch_lock);
atomic_set(&intf->event_waiters, 0); atomic_set(&intf->event_waiters, 0);
atomic_set(&intf->watchdog_waiters, 0);
atomic_set(&intf->command_waiters, 0);
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
INIT_LIST_HEAD(&intf->waiting_events); INIT_LIST_HEAD(&intf->waiting_events);
intf->waiting_events_count = 0; intf->waiting_events_count = 0;
...@@ -4408,8 +4440,6 @@ static void smi_recv_tasklet(unsigned long val) ...@@ -4408,8 +4440,6 @@ static void smi_recv_tasklet(unsigned long val)
} }
} }
smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES, newmsg);
if (!run_to_completion) if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
if (newmsg) if (newmsg)
...@@ -4537,7 +4567,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, ...@@ -4537,7 +4567,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
struct list_head *timeouts, struct list_head *timeouts,
unsigned long timeout_period, unsigned long timeout_period,
int slot, unsigned long *flags, int slot, unsigned long *flags,
unsigned int *watch_mask) bool *need_timer)
{ {
struct ipmi_recv_msg *msg; struct ipmi_recv_msg *msg;
...@@ -4549,13 +4579,14 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, ...@@ -4549,13 +4579,14 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
if (timeout_period < ent->timeout) { if (timeout_period < ent->timeout) {
ent->timeout -= timeout_period; ent->timeout -= timeout_period;
*watch_mask |= IPMI_WATCH_MASK_CHECK_MESSAGES; *need_timer = true;
return; return;
} }
if (ent->retries_left == 0) { if (ent->retries_left == 0) {
/* The message has used all its retries. */ /* The message has used all its retries. */
ent->inuse = 0; ent->inuse = 0;
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
msg = ent->recv_msg; msg = ent->recv_msg;
list_add_tail(&msg->link, timeouts); list_add_tail(&msg->link, timeouts);
if (ent->broadcast) if (ent->broadcast)
...@@ -4568,7 +4599,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, ...@@ -4568,7 +4599,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
struct ipmi_smi_msg *smi_msg; struct ipmi_smi_msg *smi_msg;
/* More retries, send again. */ /* More retries, send again. */
*watch_mask |= IPMI_WATCH_MASK_CHECK_MESSAGES; *need_timer = true;
/* /*
* Start with the max timer, set to normal timer after * Start with the max timer, set to normal timer after
...@@ -4613,20 +4644,20 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, ...@@ -4613,20 +4644,20 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
} }
} }
static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf, static bool ipmi_timeout_handler(struct ipmi_smi *intf,
unsigned long timeout_period) unsigned long timeout_period)
{ {
struct list_head timeouts; struct list_head timeouts;
struct ipmi_recv_msg *msg, *msg2; struct ipmi_recv_msg *msg, *msg2;
unsigned long flags; unsigned long flags;
int i; int i;
unsigned int watch_mask = 0; bool need_timer = false;
if (!intf->bmc_registered) { if (!intf->bmc_registered) {
kref_get(&intf->refcount); kref_get(&intf->refcount);
if (!schedule_work(&intf->bmc_reg_work)) { if (!schedule_work(&intf->bmc_reg_work)) {
kref_put(&intf->refcount, intf_free); kref_put(&intf->refcount, intf_free);
watch_mask |= IPMI_WATCH_MASK_INTERNAL; need_timer = true;
} }
} }
...@@ -4646,7 +4677,7 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf, ...@@ -4646,7 +4677,7 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
check_msg_timeout(intf, &intf->seq_table[i], check_msg_timeout(intf, &intf->seq_table[i],
&timeouts, timeout_period, i, &timeouts, timeout_period, i,
&flags, &watch_mask); &flags, &need_timer);
spin_unlock_irqrestore(&intf->seq_lock, flags); spin_unlock_irqrestore(&intf->seq_lock, flags);
list_for_each_entry_safe(msg, msg2, &timeouts, link) list_for_each_entry_safe(msg, msg2, &timeouts, link)
...@@ -4677,7 +4708,7 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf, ...@@ -4677,7 +4708,7 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
tasklet_schedule(&intf->recv_tasklet); tasklet_schedule(&intf->recv_tasklet);
return watch_mask; return need_timer;
} }
static void ipmi_request_event(struct ipmi_smi *intf) static void ipmi_request_event(struct ipmi_smi *intf)
...@@ -4697,9 +4728,8 @@ static atomic_t stop_operation; ...@@ -4697,9 +4728,8 @@ static atomic_t stop_operation;
static void ipmi_timeout(struct timer_list *unused) static void ipmi_timeout(struct timer_list *unused)
{ {
struct ipmi_smi *intf; struct ipmi_smi *intf;
unsigned int watch_mask = 0; bool need_timer = false;
int index; int index;
unsigned long flags;
if (atomic_read(&stop_operation)) if (atomic_read(&stop_operation))
return; return;
...@@ -4712,28 +4742,14 @@ static void ipmi_timeout(struct timer_list *unused) ...@@ -4712,28 +4742,14 @@ static void ipmi_timeout(struct timer_list *unused)
ipmi_request_event(intf); ipmi_request_event(intf);
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
} }
watch_mask |= IPMI_WATCH_MASK_INTERNAL; need_timer = true;
} }
if (atomic_read(&intf->watchdog_waiters)) need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
watch_mask |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
if (atomic_read(&intf->command_waiters))
watch_mask |= IPMI_WATCH_MASK_CHECK_COMMANDS;
watch_mask |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
if (watch_mask != intf->last_watch_mask &&
intf->handlers->set_need_watch)
intf->handlers->set_need_watch(intf->send_info,
watch_mask);
intf->last_watch_mask = watch_mask;
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
} }
srcu_read_unlock(&ipmi_interfaces_srcu, index); srcu_read_unlock(&ipmi_interfaces_srcu, index);
if (watch_mask) if (need_timer)
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
} }
......
...@@ -1066,7 +1066,7 @@ static void set_need_watch(void *send_info, unsigned int watch_mask) ...@@ -1066,7 +1066,7 @@ static void set_need_watch(void *send_info, unsigned int watch_mask)
unsigned long flags; unsigned long flags;
int enable; int enable;
enable = !!(watch_mask & ~IPMI_WATCH_MASK_INTERNAL); enable = !!watch_mask;
atomic_set(&smi_info->need_watch, enable); atomic_set(&smi_info->need_watch, enable);
spin_lock_irqsave(&smi_info->si_lock, flags); spin_lock_irqsave(&smi_info->si_lock, flags);
......
...@@ -1129,7 +1129,7 @@ static void ssif_set_need_watch(void *send_info, unsigned int watch_mask) ...@@ -1129,7 +1129,7 @@ static void ssif_set_need_watch(void *send_info, unsigned int watch_mask)
if (watch_mask & IPMI_WATCH_MASK_CHECK_MESSAGES) if (watch_mask & IPMI_WATCH_MASK_CHECK_MESSAGES)
timeout = SSIF_WATCH_MSG_TIMEOUT; timeout = SSIF_WATCH_MSG_TIMEOUT;
else if (watch_mask & ~IPMI_WATCH_MASK_INTERNAL) else if (watch_mask)
timeout = SSIF_WATCH_WATCHDOG_TIMEOUT; timeout = SSIF_WATCH_WATCHDOG_TIMEOUT;
flags = ipmi_ssif_lock_cond(ssif_info, &oflags); flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
......
...@@ -32,14 +32,11 @@ struct ipmi_smi; ...@@ -32,14 +32,11 @@ struct ipmi_smi;
/* /*
* Flags for set_check_watch() below. Tells if the SMI should be * Flags for set_check_watch() below. Tells if the SMI should be
* waiting for watchdog timeouts, commands and/or messages. There is * waiting for watchdog timeouts, commands and/or messages.
* also an internal flag for the message handler, SMIs should ignore
* it.
*/ */
#define IPMI_WATCH_MASK_INTERNAL (1 << 0) #define IPMI_WATCH_MASK_CHECK_MESSAGES (1 << 0)
#define IPMI_WATCH_MASK_CHECK_MESSAGES (1 << 1) #define IPMI_WATCH_MASK_CHECK_WATCHDOG (1 << 1)
#define IPMI_WATCH_MASK_CHECK_WATCHDOG (1 << 2) #define IPMI_WATCH_MASK_CHECK_COMMANDS (1 << 2)
#define IPMI_WATCH_MASK_CHECK_COMMANDS (1 << 3)
/* /*
* Messages to/from the lower layer. The smi interface will take one * Messages to/from the lower layer. The smi interface will take one
...@@ -66,12 +63,6 @@ struct ipmi_smi_msg { ...@@ -66,12 +63,6 @@ struct ipmi_smi_msg {
int rsp_size; int rsp_size;
unsigned char rsp[IPMI_MAX_MSG_LENGTH]; unsigned char rsp[IPMI_MAX_MSG_LENGTH];
/*
* There should be a response message coming back in the BMC
* message queue.
*/
bool needs_response;
/* /*
* Will be called when the system is done with the message * Will be called when the system is done with the message
* (presumably to free it). * (presumably to free it).
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment