Commit 6fadb494 authored by Takashi Iwai's avatar Takashi Iwai

ALSA: seq: Set upper limit of processed events

Currently ALSA sequencer core tries to process the queued events as
much as possible when they become dispatchable.  If applications try
to queue too massive events to be processed at the very same timing,
the sequencer core would still try to process such all events, either
in the interrupt context or via some notifier; in either away, it
might be a cause of RCU stall or such problems.

As a potential workaround for those problems, this patch adds the
upper limit of the amount of events to be processed.  The remaining
events are processed in the next batch, so they won't be lost.

For the time being, it's limited up to 1000 events per queue, which
should be high enough for any normal usages.
Reported-by: default avatarZqiang <qiang.zhang1211@gmail.com>
Reported-by: syzbot+bb950e68b400ab4f65f8@syzkaller.appspotmail.com
Link: https://lore.kernel.org/r/20211102033222.3849-1-qiang.zhang1211@gmail.com
Link: https://lore.kernel.org/r/20211207165146.2888-1-tiwai@suse.deSigned-off-by: default avatarTakashi Iwai <tiwai@suse.de>
parent 403c5210
...@@ -235,12 +235,15 @@ struct snd_seq_queue *snd_seq_queue_find_name(char *name) ...@@ -235,12 +235,15 @@ struct snd_seq_queue *snd_seq_queue_find_name(char *name)
/* -------------------------------------------------------- */ /* -------------------------------------------------------- */
#define MAX_CELL_PROCESSES_IN_QUEUE 1000
void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
{ {
unsigned long flags; unsigned long flags;
struct snd_seq_event_cell *cell; struct snd_seq_event_cell *cell;
snd_seq_tick_time_t cur_tick; snd_seq_tick_time_t cur_tick;
snd_seq_real_time_t cur_time; snd_seq_real_time_t cur_time;
int processed = 0;
if (q == NULL) if (q == NULL)
return; return;
...@@ -263,6 +266,8 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) ...@@ -263,6 +266,8 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
if (!cell) if (!cell)
break; break;
snd_seq_dispatch_event(cell, atomic, hop); snd_seq_dispatch_event(cell, atomic, hop);
if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE)
goto out; /* the rest processed at the next batch */
} }
/* Process time queue... */ /* Process time queue... */
...@@ -272,14 +277,19 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) ...@@ -272,14 +277,19 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
if (!cell) if (!cell)
break; break;
snd_seq_dispatch_event(cell, atomic, hop); snd_seq_dispatch_event(cell, atomic, hop);
if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE)
goto out; /* the rest processed at the next batch */
} }
out:
/* free lock */ /* free lock */
spin_lock_irqsave(&q->check_lock, flags); spin_lock_irqsave(&q->check_lock, flags);
if (q->check_again) { if (q->check_again) {
q->check_again = 0; q->check_again = 0;
spin_unlock_irqrestore(&q->check_lock, flags); if (processed < MAX_CELL_PROCESSES_IN_QUEUE) {
goto __again; spin_unlock_irqrestore(&q->check_lock, flags);
goto __again;
}
} }
q->check_blocked = 0; q->check_blocked = 0;
spin_unlock_irqrestore(&q->check_lock, flags); spin_unlock_irqrestore(&q->check_lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment