aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-06-25 05:47:14 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-25 10:01:01 -0700
commitbfe5d834195b3089b8846577311340376cc0f450 (patch)
tree52470de0fe87ff8372700e3472735cd5c14cee9d /kernel
parent6ceab8a936c302c0cea2bfe55617c76e2f5746fa (diff)
downloadkernel_samsung_smdk4412-bfe5d834195b3089b8846577311340376cc0f450.tar.gz
kernel_samsung_smdk4412-bfe5d834195b3089b8846577311340376cc0f450.tar.bz2
kernel_samsung_smdk4412-bfe5d834195b3089b8846577311340376cc0f450.zip
[PATCH] Define __raw_get_cpu_var and use it
There are several instances of per_cpu(foo, raw_smp_processor_id()), which is semantically equivalent to __get_cpu_var(foo) but without the warning that smp_processor_id() can give if CONFIG_DEBUG_PREEMPT is enabled. For those architectures with optimized per-cpu implementations, namely ia64, powerpc, s390, sparc64 and x86_64, per_cpu() turns into more and slower code than __get_cpu_var(), so it would be preferable to use __get_cpu_var on those platforms. This defines a __raw_get_cpu_var(x) macro which turns into per_cpu(x, raw_smp_processor_id()) on architectures that use the generic per-cpu implementation, and turns into __get_cpu_var(x) on the architectures that have an optimized per-cpu implementation. Signed-off-by: Paul Mackerras <paulus@samba.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Ingo Molnar <mingo@elte.hu> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c4
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/softlockup.c2
-rw-r--r--kernel/timer.c2
4 files changed, 6 insertions, 6 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 18324305724..9587aac72f4 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -576,7 +576,7 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
memset(timer, 0, sizeof(struct hrtimer));
- bases = per_cpu(hrtimer_bases, raw_smp_processor_id());
+ bases = __raw_get_cpu_var(hrtimer_bases);
if (clock_id == CLOCK_REALTIME && mode != HRTIMER_ABS)
clock_id = CLOCK_MONOTONIC;
@@ -599,7 +599,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
{
struct hrtimer_base *bases;
- bases = per_cpu(hrtimer_bases, raw_smp_processor_id());
+ bases = __raw_get_cpu_var(hrtimer_bases);
*tp = ktime_to_timespec(bases[which_clock].resolution);
return 0;
diff --git a/kernel/sched.c b/kernel/sched.c
index 5dbc4269447..f8d540b324c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4152,7 +4152,7 @@ EXPORT_SYMBOL(yield);
*/
void __sched io_schedule(void)
{
- struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
+ struct runqueue *rq = &__raw_get_cpu_var(runqueues);
atomic_inc(&rq->nr_iowait);
schedule();
@@ -4163,7 +4163,7 @@ EXPORT_SYMBOL(io_schedule);
long __sched io_schedule_timeout(long timeout)
{
- struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
+ struct runqueue *rq = &__raw_get_cpu_var(runqueues);
long ret;
atomic_inc(&rq->nr_iowait);
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 14c7faf0290..2c1be1163ed 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -36,7 +36,7 @@ static struct notifier_block panic_block = {
void touch_softlockup_watchdog(void)
{
- per_cpu(touch_timestamp, raw_smp_processor_id()) = jiffies;
+ __raw_get_cpu_var(touch_timestamp) = jiffies;
}
EXPORT_SYMBOL(touch_softlockup_watchdog);
diff --git a/kernel/timer.c b/kernel/timer.c
index f35b3939e93..eb97371b87d 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -146,7 +146,7 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
void fastcall init_timer(struct timer_list *timer)
{
timer->entry.next = NULL;
- timer->base = per_cpu(tvec_bases, raw_smp_processor_id());
+ timer->base = __raw_get_cpu_var(tvec_bases);
}
EXPORT_SYMBOL(init_timer);