Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJiri Slaby <jslaby@suse.cz>2011-07-09 11:17:46 +0200
committerJiri Slaby <jslaby@suse.cz>2011-07-09 11:17:46 +0200
commite95707dc90da6c7a3ad2385e0d0e110cea488091 (patch)
tree9bfd8a9a83a859f8c4f29322708c176c32b8c1ed
parent56e99dabe82b4e0edb8c336cf9e74160fb86577a (diff)
- Delete patches.fixes/x86_mtrr_stop_machine_1_2.patch.
- Delete patches.fixes/x86_mtrr_use_stop_machine_2_2.patch. They were superseded by other patches in aa3d6e2b140aac24a432f830d30047b1842aed0b.
-rw-r--r--patches.fixes/x86_mtrr_stop_machine_1_2.patch183
-rw-r--r--patches.fixes/x86_mtrr_use_stop_machine_2_2.patch231
2 files changed, 0 insertions, 414 deletions
diff --git a/patches.fixes/x86_mtrr_stop_machine_1_2.patch b/patches.fixes/x86_mtrr_stop_machine_1_2.patch
deleted file mode 100644
index fde706861f..0000000000
--- a/patches.fixes/x86_mtrr_stop_machine_1_2.patch
+++ /dev/null
@@ -1,183 +0,0 @@
-From: Suresh Siddha <suresh.b.siddha@intel.com>
-Subject: stop_machine: enable __stop_machine() to be called from the cpu online path
-Patch-Mainline: Submitted for 3.0 and stable kernels
-References: bnc#672008
-
-Signed-off-by: Thomas Renninger <trenn@suse.de>
-
-Currently stop machine infrastructure can be called only from a cpu that is
-online. But for !CONFIG_SMP, we do allow calling __stop_machine() before the
-cpu is online.
-
-x86 for example requires stop machine infrastructure in the cpu online path
-and currently implements its own stop machine (using stop_one_cpu_nowait())
-for MTRR initialization in the cpu online path.
-
-Enhance the __stop_machine() so that it can be called before the cpu
-is onlined. This will pave the way for code consolidation and address potential
-deadlocks caused by multiple mechanisms of doing system wide rendezvous.
-
-This will also address the behavioral differences of __stop_machine()
-between SMP and UP builds.
-
-Also mark __stop_cpus() to be static, no one else uses it.
-
-Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
-Cc: stable@kernel.org # v2.6.35+
----
- kernel/stop_machine.c | 58 ++++++++++++++++++++++++++++++++++++++++++--------
- 1 file changed, 49 insertions(+), 9 deletions(-)
-
-Index: linux-2.6-tip/kernel/stop_machine.c
-===================================================================
---- linux-2.6-tip.orig/kernel/stop_machine.c
-+++ linux-2.6-tip/kernel/stop_machine.c
-@@ -28,6 +28,7 @@
- struct cpu_stop_done {
- atomic_t nr_todo; /* nr left to execute */
- bool executed; /* actually executed? */
-+ bool offline_ctxt; /* stop_cpu from offline ctxt */
- int ret; /* collected return value */
- struct completion completion; /* fired if nr_todo reaches 0 */
- };
-@@ -47,15 +48,32 @@ static void cpu_stop_init_done(struct cp
- memset(done, 0, sizeof(*done));
- atomic_set(&done->nr_todo, nr_todo);
- init_completion(&done->completion);
-+ done->offline_ctxt = !percpu_read(cpu_stopper.enabled);
-+}
-+
-+static inline void cpu_stop_wait_for_completion(struct cpu_stop_done *done)
-+{
-+ if (!done->offline_ctxt)
-+ wait_for_completion(&done->completion);
-+ else {
-+ /*
-+ * If the calling cpu is not online, then we can't afford to
-+ * sleep, so poll till the work is completed on the target
-+ * cpu's.
-+ */
-+ while (atomic_read(&done->nr_todo))
-+ cpu_relax();
-+ }
- }
-
- /* signal completion unless @done is NULL */
- static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
- {
- if (done) {
-+ bool offline_ctxt = done->offline_ctxt;
- if (executed)
- done->executed = true;
-- if (atomic_dec_and_test(&done->nr_todo))
-+ if (atomic_dec_and_test(&done->nr_todo) && !offline_ctxt)
- complete(&done->completion);
- }
- }
-@@ -108,7 +126,7 @@ int stop_one_cpu(unsigned int cpu, cpu_s
-
- cpu_stop_init_done(&done, 1);
- cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work);
-- wait_for_completion(&done.completion);
-+ cpu_stop_wait_for_completion(&done);
- return done.executed ? done.ret : -ENOENT;
- }
-
-@@ -136,20 +154,24 @@ void stop_one_cpu_nowait(unsigned int cp
- static DEFINE_MUTEX(stop_cpus_mutex);
- static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
-
-+static
- int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
- {
-+ int online = percpu_read(cpu_stopper.enabled);
- struct cpu_stop_work *work;
- struct cpu_stop_done done;
-+ unsigned int weight = 0;
- unsigned int cpu;
-
- /* initialize works and done */
-- for_each_cpu(cpu, cpumask) {
-+ for_each_cpu_and(cpu, cpumask, cpu_online_mask) {
- work = &per_cpu(stop_cpus_work, cpu);
- work->fn = fn;
- work->arg = arg;
- work->done = &done;
-+ weight++;
- }
-- cpu_stop_init_done(&done, cpumask_weight(cpumask));
-+ cpu_stop_init_done(&done, weight);
-
- /*
- * Disable preemption while queueing to avoid getting
-@@ -157,12 +179,19 @@ int __stop_cpus(const struct cpumask *cp
- * to enter @fn which can lead to deadlock.
- */
- preempt_disable();
-- for_each_cpu(cpu, cpumask)
-+ for_each_cpu_and(cpu, cpumask, cpu_online_mask)
- cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
- &per_cpu(stop_cpus_work, cpu));
-+
-+ /*
-+ * This cpu is not yet online. If @fn needs to be run on this
-+ * cpu, run it now.
-+ */
-+ if (!online && cpu_isset(smp_processor_id(), *cpumask))
-+ fn(arg);
- preempt_enable();
-
-- wait_for_completion(&done.completion);
-+ cpu_stop_wait_for_completion(&done);
- return done.executed ? done.ret : -ENOENT;
- }
-
-@@ -431,6 +460,7 @@ static int stop_machine_cpu_stop(void *d
- struct stop_machine_data *smdata = data;
- enum stopmachine_state curstate = STOPMACHINE_NONE;
- int cpu = smp_processor_id(), err = 0;
-+ unsigned long flags = 0;
- bool is_active;
-
- if (!smdata->active_cpus)
-@@ -446,7 +476,7 @@ static int stop_machine_cpu_stop(void *d
- curstate = smdata->state;
- switch (curstate) {
- case STOPMACHINE_DISABLE_IRQ:
-- local_irq_disable();
-+ local_irq_save(flags);
- hard_irq_disable();
- break;
- case STOPMACHINE_RUN:
-@@ -460,7 +490,7 @@ static int stop_machine_cpu_stop(void *d
- }
- } while (curstate != STOPMACHINE_EXIT);
-
-- local_irq_enable();
-+ local_irq_restore(flags);
- return err;
- }
-
-@@ -470,9 +500,19 @@ int __stop_machine(int (*fn)(void *), vo
- .num_threads = num_online_cpus(),
- .active_cpus = cpus };
-
-+ /* Include the calling cpu that might not be online yet. */
-+ if (!percpu_read(cpu_stopper.enabled))
-+ smdata.num_threads++;
-+
- /* Set the initial state and stop all online cpus. */
- set_state(&smdata, STOPMACHINE_PREPARE);
-- return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata);
-+
-+ if (percpu_read(cpu_stopper.enabled))
-+ return stop_cpus(cpu_online_mask, stop_machine_cpu_stop,
-+ &smdata);
-+ else
-+ return __stop_cpus(cpu_all_mask, stop_machine_cpu_stop,
-+ &smdata);
- }
-
- int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
-
-
diff --git a/patches.fixes/x86_mtrr_use_stop_machine_2_2.patch b/patches.fixes/x86_mtrr_use_stop_machine_2_2.patch
deleted file mode 100644
index 0c870e0d02..0000000000
--- a/patches.fixes/x86_mtrr_use_stop_machine_2_2.patch
+++ /dev/null
@@ -1,231 +0,0 @@
-From: Suresh Siddha <suresh.b.siddha@intel.com>
-Subject: x86, mtrr: use __stop_machine() for doing MTRR rendezvous
-Patch-Mainline: Submitted for 3.0 and stable kernels
-References: bnc#672008
-
-Signed-off-by: Thomas Renninger <trenn@suse.de>
-
-MTRR rendezvous seqeuence using stop_one_cpu_nowait() can potentially
-happen in parallel with another system wide rendezvous using
-stop_machine(). This can lead to deadlock (The order in which
-works are queued can be different on different cpu's. Some cpu's
-will be running the first rendezvous handler and others will be running
-the second rendezvous handler. Each set waiting for the other set to join
-for the system wide rendezvous, leading to a deadlock).
-
-MTRR rendezvous sequence is not implemened using stop_machine() before, as this
-gets called both from the process context aswell as the cpu online paths
-(where the cpu has not come online and the interrupts are disabled etc).
-
-Now that __stop_machine() works even when the calling cpu is not online,
-use __stop_machine() to implement the MTRR rendezvous sequence. This
-will consolidate the code aswell as avoid the above mentioned deadlock.
-
-Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
-Cc: stable@kernel.org # v2.6.35+
----
- arch/x86/kernel/cpu/mtrr/main.c | 154 +++++++---------------------------------
- 1 file changed, 27 insertions(+), 127 deletions(-)
-
-Index: linux-2.6-tip/arch/x86/kernel/cpu/mtrr/main.c
-===================================================================
---- linux-2.6-tip.orig/arch/x86/kernel/cpu/mtrr/main.c
-+++ linux-2.6-tip/arch/x86/kernel/cpu/mtrr/main.c
-@@ -137,18 +137,15 @@ static void __init init_table(void)
- }
-
- struct set_mtrr_data {
-- atomic_t count;
-- atomic_t gate;
- unsigned long smp_base;
- unsigned long smp_size;
- unsigned int smp_reg;
- mtrr_type smp_type;
- };
-
--static DEFINE_PER_CPU(struct cpu_stop_work, mtrr_work);
--
- /**
-- * mtrr_work_handler - Synchronisation handler. Executed by "other" CPUs.
-+ * mtrr_work_handler - Work done in the synchronisation handler. Executed by
-+ * all the CPUs.
- * @info: pointer to mtrr configuration data
- *
- * Returns nothing.
-@@ -157,35 +154,26 @@ static int mtrr_work_handler(void *info)
- {
- #ifdef CONFIG_SMP
- struct set_mtrr_data *data = info;
-- unsigned long flags;
--
-- atomic_dec(&data->count);
-- while (!atomic_read(&data->gate))
-- cpu_relax();
--
-- local_irq_save(flags);
-
-- atomic_dec(&data->count);
-- while (atomic_read(&data->gate))
-- cpu_relax();
--
-- /* The master has cleared me to execute */
-+ /*
-+ * We use this same function to initialize the mtrrs during boot,
-+ * resume, runtime cpu online and on an explicit request to set a
-+ * specific MTRR.
-+ *
-+ * During boot or suspend, the state of the boot cpu's mtrrs has been
-+ * saved, and we want to replicate that across all the cpus that come
-+ * online (either at the end of boot or resume or during a runtime cpu
-+ * online). If we're doing that, @reg is set to something special and on
-+ * all the cpu's we do mtrr_if->set_all() (On the logical cpu that
-+ * started the boot/resume sequence, this might be a duplicate
-+ * set_all()).
-+ */
- if (data->smp_reg != ~0U) {
- mtrr_if->set(data->smp_reg, data->smp_base,
- data->smp_size, data->smp_type);
-- } else if (mtrr_aps_delayed_init) {
-- /*
-- * Initialize the MTRRs inaddition to the synchronisation.
-- */
-+ } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
- mtrr_if->set_all();
- }
--
-- atomic_dec(&data->count);
-- while (!atomic_read(&data->gate))
-- cpu_relax();
--
-- atomic_dec(&data->count);
-- local_irq_restore(flags);
- #endif
- return 0;
- }
-@@ -223,20 +211,11 @@ static inline int types_compatible(mtrr_
- * 14. Wait for buddies to catch up
- * 15. Enable interrupts.
- *
-- * What does that mean for us? Well, first we set data.count to the number
-- * of CPUs. As each CPU announces that it started the rendezvous handler by
-- * decrementing the count, We reset data.count and set the data.gate flag
-- * allowing all the cpu's to proceed with the work. As each cpu disables
-- * interrupts, it'll decrement data.count once. We wait until it hits 0 and
-- * proceed. We clear the data.gate flag and reset data.count. Meanwhile, they
-- * are waiting for that flag to be cleared. Once it's cleared, each
-- * CPU goes through the transition of updating MTRRs.
-- * The CPU vendors may each do it differently,
-- * so we call mtrr_if->set() callback and let them take care of it.
-- * When they're done, they again decrement data->count and wait for data.gate
-- * to be set.
-- * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
-- * Everyone then enables interrupts and we all continue on.
-+ * What does that mean for us? Well, __stop_machine() will ensure that
-+ * the rendezvous handler is started on each CPU. And in lockstep they
-+ * do the state transition of disabling interrupts, updating MTRR's
-+ * (the CPU vendors may each do it differently, so we call mtrr_if->set()
-+ * callback and let them take care of it.) and enabling interrupts.
- *
- * Note that the mechanism is the same for UP systems, too; all the SMP stuff
- * becomes nops.
-@@ -244,92 +223,13 @@ static inline int types_compatible(mtrr_
- static void
- set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
- {
-- struct set_mtrr_data data;
-- unsigned long flags;
-- int cpu;
--
-- preempt_disable();
--
-- data.smp_reg = reg;
-- data.smp_base = base;
-- data.smp_size = size;
-- data.smp_type = type;
-- atomic_set(&data.count, num_booting_cpus() - 1);
--
-- /* Make sure data.count is visible before unleashing other CPUs */
-- smp_wmb();
-- atomic_set(&data.gate, 0);
--
-- /* Start the ball rolling on other CPUs */
-- for_each_online_cpu(cpu) {
-- struct cpu_stop_work *work = &per_cpu(mtrr_work, cpu);
--
-- if (cpu == smp_processor_id())
-- continue;
--
-- stop_one_cpu_nowait(cpu, mtrr_work_handler, &data, work);
-- }
--
--
-- while (atomic_read(&data.count))
-- cpu_relax();
--
-- /* Ok, reset count and toggle gate */
-- atomic_set(&data.count, num_booting_cpus() - 1);
-- smp_wmb();
-- atomic_set(&data.gate, 1);
--
-- local_irq_save(flags);
--
-- while (atomic_read(&data.count))
-- cpu_relax();
--
-- /* Ok, reset count and toggle gate */
-- atomic_set(&data.count, num_booting_cpus() - 1);
-- smp_wmb();
-- atomic_set(&data.gate, 0);
--
-- /* Do our MTRR business */
--
-- /*
-- * HACK!
-- *
-- * We use this same function to initialize the mtrrs during boot,
-- * resume, runtime cpu online and on an explicit request to set a
-- * specific MTRR.
-- *
-- * During boot or suspend, the state of the boot cpu's mtrrs has been
-- * saved, and we want to replicate that across all the cpus that come
-- * online (either at the end of boot or resume or during a runtime cpu
-- * online). If we're doing that, @reg is set to something special and on
-- * this cpu we still do mtrr_if->set_all(). During boot/resume, this
-- * is unnecessary if at this point we are still on the cpu that started
-- * the boot/resume sequence. But there is no guarantee that we are still
-- * on the same cpu. So we do mtrr_if->set_all() on this cpu aswell to be
-- * sure that we are in sync with everyone else.
-- */
-- if (reg != ~0U)
-- mtrr_if->set(reg, base, size, type);
-- else
-- mtrr_if->set_all();
--
-- /* Wait for the others */
-- while (atomic_read(&data.count))
-- cpu_relax();
--
-- atomic_set(&data.count, num_booting_cpus() - 1);
-- smp_wmb();
-- atomic_set(&data.gate, 1);
--
-- /*
-- * Wait here for everyone to have seen the gate change
-- * So we're the last ones to touch 'data'
-- */
-- while (atomic_read(&data.count))
-- cpu_relax();
-+ struct set_mtrr_data data = { .smp_reg = reg,
-+ .smp_base = base,
-+ .smp_size = size,
-+ .smp_type = type
-+ };
-
-- local_irq_restore(flags);
-- preempt_enable();
-+ __stop_machine(mtrr_work_handler, &data, cpu_callout_mask);
- }
-
- /**
-
-