Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
d58a247c
Commit
d58a247c
authored
Jun 14, 2002
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
- revert the raw spinlock usage - it's too dangerous and volatile.
parent
873cbfcf
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
5 additions
and
21 deletions
+5
-21
kernel/sched.c
kernel/sched.c
+5
-21
No files found.
kernel/sched.c
View file @
d58a247c
...
@@ -156,12 +156,6 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
...
@@ -156,12 +156,6 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
* task_rq_lock - lock the runqueue a given task resides on and disable
* task_rq_lock - lock the runqueue a given task resides on and disable
* interrupts. Note the ordering: we can safely lookup the task_rq without
* interrupts. Note the ordering: we can safely lookup the task_rq without
* explicitly disabling preemption.
* explicitly disabling preemption.
*
* WARNING: to squeeze out a few more cycles we do not disable preemption
* explicitly (or implicitly), we just keep interrupts disabled. This means
* that within task_rq_lock/unlock sections you must be careful
* about locking/unlocking spinlocks, since they could cause an unexpected
* preemption.
*/
*/
static
inline
runqueue_t
*
task_rq_lock
(
task_t
*
p
,
unsigned
long
*
flags
)
static
inline
runqueue_t
*
task_rq_lock
(
task_t
*
p
,
unsigned
long
*
flags
)
{
{
...
@@ -170,9 +164,9 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
...
@@ -170,9 +164,9 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
repeat_lock_task:
repeat_lock_task:
local_irq_save
(
*
flags
);
local_irq_save
(
*
flags
);
rq
=
task_rq
(
p
);
rq
=
task_rq
(
p
);
_raw_
spin_lock
(
&
rq
->
lock
);
spin_lock
(
&
rq
->
lock
);
if
(
unlikely
(
rq
!=
task_rq
(
p
)))
{
if
(
unlikely
(
rq
!=
task_rq
(
p
)))
{
_raw_
spin_unlock_irqrestore
(
&
rq
->
lock
,
*
flags
);
spin_unlock_irqrestore
(
&
rq
->
lock
,
*
flags
);
goto
repeat_lock_task
;
goto
repeat_lock_task
;
}
}
return
rq
;
return
rq
;
...
@@ -180,8 +174,7 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
...
@@ -180,8 +174,7 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
static
inline
void
task_rq_unlock
(
runqueue_t
*
rq
,
unsigned
long
*
flags
)
static
inline
void
task_rq_unlock
(
runqueue_t
*
rq
,
unsigned
long
*
flags
)
{
{
_raw_spin_unlock_irqrestore
(
&
rq
->
lock
,
*
flags
);
spin_unlock_irqrestore
(
&
rq
->
lock
,
*
flags
);
preempt_check_resched
();
}
}
/*
/*
...
@@ -289,15 +282,8 @@ static inline void resched_task(task_t *p)
...
@@ -289,15 +282,8 @@ static inline void resched_task(task_t *p)
nrpolling
|=
test_tsk_thread_flag
(
p
,
TIF_POLLING_NRFLAG
);
nrpolling
|=
test_tsk_thread_flag
(
p
,
TIF_POLLING_NRFLAG
);
if
(
!
need_resched
&&
!
nrpolling
&&
(
p
->
thread_info
->
cpu
!=
smp_processor_id
()))
if
(
!
need_resched
&&
!
nrpolling
&&
(
p
->
thread_info
->
cpu
!=
smp_processor_id
()))
/*
* NOTE: smp_send_reschedule() can be called from
* spinlocked sections which do not have an elevated
* preemption count. So the code either has to avoid
* spinlocks, or has to put preempt_disable() and
* preempt_enable_no_resched() around the code.
*/
smp_send_reschedule
(
p
->
thread_info
->
cpu
);
smp_send_reschedule
(
p
->
thread_info
->
cpu
);
preempt_enable
_no_resched
();
preempt_enable
();
#else
#else
set_tsk_need_resched
(
p
);
set_tsk_need_resched
(
p
);
#endif
#endif
...
@@ -348,10 +334,8 @@ void wait_task_inactive(task_t * p)
...
@@ -348,10 +334,8 @@ void wait_task_inactive(task_t * p)
*/
*/
void
kick_if_running
(
task_t
*
p
)
void
kick_if_running
(
task_t
*
p
)
{
{
if
(
p
==
task_rq
(
p
)
->
curr
)
{
if
(
p
==
task_rq
(
p
)
->
curr
)
resched_task
(
p
);
resched_task
(
p
);
preempt_check_resched
();
}
}
}
#endif
#endif
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment