diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-12-12 14:47:40 +0000 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-12-13 16:33:19 +0000 |
commit | 0d782dc430d94dc36b47cb11c2e33ecb1bb38234 (patch) | |
tree | d9718e6c1aefdf943c6ad16ceaae25f692f40d3f /arch/arm/vfp | |
parent | 002939729c7c8f6448e89e9e86c8c5bf6f0c77d3 (diff) | |
download | kernel_samsung_smdk4412-0d782dc430d94dc36b47cb11c2e33ecb1bb38234.tar.gz kernel_samsung_smdk4412-0d782dc430d94dc36b47cb11c2e33ecb1bb38234.tar.bz2 kernel_samsung_smdk4412-0d782dc430d94dc36b47cb11c2e33ecb1bb38234.zip |
ARM: VFP: fix vfp thread init bug and document vfp notifier entry conditions
When the VFP notifier is called for flush_thread(), we may be
preemptible, meaning we might migrate to another CPU, which means
referencing the current CPU number without some form of locking is
invalid, and can cause data corruption.
For the most cases, this isn't a problem since atomic notifiers are run
under rcu lock, which for most configurations results in preemption
being disabled - except when the preemptable tree-based rcu
implementation is selected.
Let's make it safe anyway.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/vfp')
-rw-r--r-- | arch/arm/vfp/vfpmodule.c | 83 |
1 files changed, 62 insertions, 21 deletions
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 2d7423af119..aed05bc3c2e 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -38,16 +38,72 @@ union vfp_state *last_VFP_context[NR_CPUS]; */ unsigned int VFP_arch; +/* + * Per-thread VFP initialization. + */ +static void vfp_thread_flush(struct thread_info *thread) +{ + union vfp_state *vfp = &thread->vfpstate; + unsigned int cpu; + + memset(vfp, 0, sizeof(union vfp_state)); + + vfp->hard.fpexc = FPEXC_EN; + vfp->hard.fpscr = FPSCR_ROUND_NEAREST; + + /* + * Disable VFP to ensure we initialize it first. We must ensure + * that the modification of last_VFP_context[] and hardware disable + * are done for the same CPU and without preemption. + */ + cpu = get_cpu(); + if (last_VFP_context[cpu] == vfp) + last_VFP_context[cpu] = NULL; + fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); + put_cpu(); +} + +static void vfp_thread_release(struct thread_info *thread) +{ + /* release case: Per-thread VFP cleanup. */ + union vfp_state *vfp = &thread->vfpstate; + unsigned int cpu = thread->cpu; + + if (last_VFP_context[cpu] == vfp) + last_VFP_context[cpu] = NULL; +} + +/* + * When this function is called with the following 'cmd's, the following + * is true while this function is being run: + * THREAD_NOFTIFY_SWTICH: + * - the previously running thread will not be scheduled onto another CPU. + * - the next thread to be run (v) will not be running on another CPU. + * - thread->cpu is the local CPU number + * - not preemptible as we're called in the middle of a thread switch + * THREAD_NOTIFY_FLUSH: + * - the thread (v) will be running on the local CPU, so + * v === current_thread_info() + * - thread->cpu is the local CPU number at the time it is accessed, + * but may change at any time. + * - we could be preempted if tree preempt rcu is enabled, so + * it is unsafe to use thread->cpu. + * THREAD_NOTIFY_RELEASE: + * - the thread (v) will not be running on any CPU; it is a dead thread. + * - thread->cpu will be the last CPU the thread ran on, which may not + * be the current CPU. + * - we could be preempted if tree preempt rcu is enabled. + */ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) { struct thread_info *thread = v; - union vfp_state *vfp; - __u32 cpu = thread->cpu; if (likely(cmd == THREAD_NOTIFY_SWITCH)) { u32 fpexc = fmrx(FPEXC); #ifdef CONFIG_SMP + unsigned int cpu = thread->cpu; + /* * On SMP, if VFP is enabled, save the old state in * case the thread migrates to a different CPU. The @@ -74,25 +130,10 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) return NOTIFY_DONE; } - vfp = &thread->vfpstate; - if (cmd == THREAD_NOTIFY_FLUSH) { - /* - * Per-thread VFP initialisation. - */ - memset(vfp, 0, sizeof(union vfp_state)); - - vfp->hard.fpexc = FPEXC_EN; - vfp->hard.fpscr = FPSCR_ROUND_NEAREST; - - /* - * Disable VFP to ensure we initialise it first. - */ - fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); - } - - /* flush and release case: Per-thread VFP cleanup. */ - if (last_VFP_context[cpu] == vfp) - last_VFP_context[cpu] = NULL; + if (cmd == THREAD_NOTIFY_FLUSH) + vfp_thread_flush(thread); + else + vfp_thread_release(thread); return NOTIFY_DONE; } |