Commit 74d47d75 authored by Heiner Kallweit's avatar Heiner Kallweit Committed by Mauro Carvalho Chehab

[media] rc: refactor raw handler kthread

I think we can get rid of the spinlock protecting the kthread from being
interrupted by a wakeup in certain parts.
Even with the current implementation of the kthread the only lost wakeup
scenario could happen if the wakeup occurs between the kfifo_len check
and setting the state to TASK_INTERRUPTIBLE.

In the changed version we could lose a wakeup if it occurs between
processing the fifo content and setting the state to TASK_INTERRUPTIBLE.
This scenario is covered by an additional check for available events in
the fifo and setting the state to TASK_RUNNING in this case.

In addition the changed version flushes the kfifo before ending
when the kthread is stopped.

With this patch we gain:
- Get rid of the spinlock
- Simplify code
- Don't grep / release the mutex for each individual event but just once
  for the complete fifo content. This reduces overhead if a driver e.g.
  triggers processing after writing the content of a hw fifo to the kfifo.
Signed-off-by: default avatarHeiner Kallweit <hkallweit1@gmail.com>
Signed-off-by: default avatarSean Young <sean@mess.org>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab@s-opensource.com>
parent 0cffd631
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#define MAX_IR_EVENT_SIZE 512 #define MAX_IR_EVENT_SIZE 512
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h>
#include <media/rc-core.h> #include <media/rc-core.h>
struct ir_raw_handler { struct ir_raw_handler {
...@@ -37,7 +36,6 @@ struct ir_raw_handler { ...@@ -37,7 +36,6 @@ struct ir_raw_handler {
struct ir_raw_event_ctrl { struct ir_raw_event_ctrl {
struct list_head list; /* to keep track of raw clients */ struct list_head list; /* to keep track of raw clients */
struct task_struct *thread; struct task_struct *thread;
spinlock_t lock;
/* fifo for the pulse/space durations */ /* fifo for the pulse/space durations */
DECLARE_KFIFO(kfifo, struct ir_raw_event, MAX_IR_EVENT_SIZE); DECLARE_KFIFO(kfifo, struct ir_raw_event, MAX_IR_EVENT_SIZE);
ktime_t last_event; /* when last event occurred */ ktime_t last_event; /* when last event occurred */
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/kmod.h> #include <linux/kmod.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/freezer.h>
#include "rc-core-priv.h" #include "rc-core-priv.h"
/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */ /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
...@@ -34,32 +33,26 @@ static int ir_raw_event_thread(void *data) ...@@ -34,32 +33,26 @@ static int ir_raw_event_thread(void *data)
struct ir_raw_handler *handler; struct ir_raw_handler *handler;
struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data; struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
while (!kthread_should_stop()) { while (1) {
mutex_lock(&ir_raw_handler_lock);
spin_lock_irq(&raw->lock); while (kfifo_out(&raw->kfifo, &ev, 1)) {
list_for_each_entry(handler, &ir_raw_handler_list, list)
if (!kfifo_len(&raw->kfifo)) { if (raw->dev->enabled_protocols &
set_current_state(TASK_INTERRUPTIBLE); handler->protocols || !handler->protocols)
handler->decode(raw->dev, ev);
if (kthread_should_stop()) raw->prev_ev = ev;
set_current_state(TASK_RUNNING);
spin_unlock_irq(&raw->lock);
schedule();
continue;
} }
mutex_unlock(&ir_raw_handler_lock);
if(!kfifo_out(&raw->kfifo, &ev, 1)) set_current_state(TASK_INTERRUPTIBLE);
dev_err(&raw->dev->dev, "IR event FIFO is empty!\n");
spin_unlock_irq(&raw->lock);
mutex_lock(&ir_raw_handler_lock); if (kthread_should_stop()) {
list_for_each_entry(handler, &ir_raw_handler_list, list) __set_current_state(TASK_RUNNING);
if (raw->dev->enabled_protocols & handler->protocols || break;
!handler->protocols) } else if (!kfifo_is_empty(&raw->kfifo))
handler->decode(raw->dev, ev); set_current_state(TASK_RUNNING);
raw->prev_ev = ev;
mutex_unlock(&ir_raw_handler_lock); schedule();
} }
return 0; return 0;
...@@ -218,14 +211,10 @@ EXPORT_SYMBOL_GPL(ir_raw_event_set_idle); ...@@ -218,14 +211,10 @@ EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
*/ */
void ir_raw_event_handle(struct rc_dev *dev) void ir_raw_event_handle(struct rc_dev *dev)
{ {
unsigned long flags;
if (!dev->raw) if (!dev->raw)
return; return;
spin_lock_irqsave(&dev->raw->lock, flags);
wake_up_process(dev->raw->thread); wake_up_process(dev->raw->thread);
spin_unlock_irqrestore(&dev->raw->lock, flags);
} }
EXPORT_SYMBOL_GPL(ir_raw_event_handle); EXPORT_SYMBOL_GPL(ir_raw_event_handle);
...@@ -269,7 +258,6 @@ int ir_raw_event_register(struct rc_dev *dev) ...@@ -269,7 +258,6 @@ int ir_raw_event_register(struct rc_dev *dev)
dev->change_protocol = change_protocol; dev->change_protocol = change_protocol;
INIT_KFIFO(dev->raw->kfifo); INIT_KFIFO(dev->raw->kfifo);
spin_lock_init(&dev->raw->lock);
dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw, dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
"rc%u", dev->minor); "rc%u", dev->minor);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment