Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@penguin.transmeta.com>2002-03-11 21:33:50 -0800
committerLinus Torvalds <torvalds@penguin.transmeta.com>2002-03-11 21:33:50 -0800
commit044996c2809f4c2e7747644a9dbd85668c2bde1b (patch)
treee19de0fc4919cade30340effd3ed667fc5001455
parenteb86b60f8e6556bafc0ff4d8843bb6fcf57c8516 (diff)
parent97cab050be714a476a5e516de71de5ca61238e23 (diff)
Merge master.kernel.org:/home/davem/BK/sched-2.5
into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
-rw-r--r--kernel/sched.c46
1 files changed, 34 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 45e3b6103360..73110e6a8abc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -140,6 +140,7 @@ struct prio_array {
*/
struct runqueue {
spinlock_t lock;
+ spinlock_t frozen;
unsigned long nr_running, nr_switches, expired_timestamp;
task_t *curr, *idle;
prio_array_t *active, *expired, arrays[2];
@@ -400,7 +401,7 @@ void sched_exit(task_t * p)
#if CONFIG_SMP || CONFIG_PREEMPT
asmlinkage void schedule_tail(void)
{
- spin_unlock_irq(&this_rq()->lock);
+ spin_unlock_irq(&this_rq()->frozen);
}
#endif
@@ -518,12 +519,14 @@ static void load_balance(runqueue_t *this_rq, int idle)
busiest = NULL;
max_load = 1;
for (i = 0; i < smp_num_cpus; i++) {
- rq_src = cpu_rq(cpu_logical_map(i));
- if (idle || (rq_src->nr_running < this_rq->prev_nr_running[i]))
+ int logical = cpu_logical_map(i);
+
+ rq_src = cpu_rq(logical);
+ if (idle || (rq_src->nr_running < this_rq->prev_nr_running[logical]))
load = rq_src->nr_running;
else
- load = this_rq->prev_nr_running[i];
- this_rq->prev_nr_running[i] = rq_src->nr_running;
+ load = this_rq->prev_nr_running[logical];
+ this_rq->prev_nr_running[logical] = rq_src->nr_running;
if ((load > max_load) && (rq_src != this_rq)) {
busiest = rq_src;
@@ -590,7 +593,7 @@ skip_queue:
#define CAN_MIGRATE_TASK(p,rq,this_cpu) \
((jiffies - (p)->sleep_timestamp > cache_decay_ticks) && \
((p) != (rq)->curr) && \
- (tmp->cpus_allowed & (1 << (this_cpu))))
+ ((p)->cpus_allowed & (1 << (this_cpu))))
if (!CAN_MIGRATE_TASK(tmp, busiest, this_cpu)) {
curr = curr->next;
@@ -808,16 +811,22 @@ switch_tasks:
if (likely(prev != next)) {
rq->nr_switches++;
rq->curr = next;
+ spin_lock(&rq->frozen);
+ spin_unlock(&rq->lock);
+
context_switch(prev, next);
+
/*
* The runqueue pointer might be from another CPU
* if the new task was last running on a different
* CPU - thus re-load it.
*/
- barrier();
+ mb();
rq = this_rq();
+ spin_unlock_irq(&rq->frozen);
+ } else {
+ spin_unlock_irq(&rq->lock);
}
- spin_unlock_irq(&rq->lock);
reacquire_kernel_lock(current);
preempt_enable_no_resched();
@@ -1463,6 +1472,7 @@ void __init sched_init(void)
rq->active = rq->arrays;
rq->expired = rq->arrays + 1;
spin_lock_init(&rq->lock);
+ spin_lock_init(&rq->frozen);
INIT_LIST_HEAD(&rq->migration_queue);
for (j = 0; j < 2; j++) {
@@ -1649,19 +1659,31 @@ repeat:
void __init migration_init(void)
{
+ unsigned long tmp, orig_cache_decay_ticks;
int cpu;
- for (cpu = 0; cpu < smp_num_cpus; cpu++)
+ tmp = 0;
+ for (cpu = 0; cpu < smp_num_cpus; cpu++) {
if (kernel_thread(migration_thread, NULL,
CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0)
BUG();
+ tmp |= (1UL << cpu_logical_map(cpu));
+ }
+
+ migration_mask = tmp;
+
+ orig_cache_decay_ticks = cache_decay_ticks;
+ cache_decay_ticks = 0;
- migration_mask = (1 << smp_num_cpus) - 1;
+ for (cpu = 0; cpu < smp_num_cpus; cpu++) {
+ int logical = cpu_logical_map(cpu);
- for (cpu = 0; cpu < smp_num_cpus; cpu++)
- while (!cpu_rq(cpu)->migration_thread)
+ while (!cpu_rq(logical)->migration_thread)
schedule_timeout(2);
+ }
if (migration_mask)
BUG();
+
+ cache_decay_ticks = orig_cache_decay_ticks;
}
#endif