1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
|
Subject: cpu/hotplug: Implement CPU pinning
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 19 Jul 2017 17:31:20 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.1-rt3.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/sched.h | 1 +
kernel/cpu.c | 38 ++++++++++++++++++++++++++++++++++++++
2 files changed, 39 insertions(+)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -671,6 +671,7 @@ struct task_struct {
#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
int migrate_disable;
int migrate_disable_update;
+ int pinned_on_cpu;
# ifdef CONFIG_SCHED_DEBUG
int migrate_disable_atomic;
# endif
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -74,6 +74,11 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_s
.fail = CPUHP_INVALID,
};
+#ifdef CONFIG_HOTPLUG_CPU
+static DEFINE_PER_CPU(struct rt_rw_lock, cpuhp_pin_lock) = \
+ __RWLOCK_RT_INITIALIZER(cpuhp_pin_lock);
+#endif
+
#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
static struct lockdep_map cpuhp_state_up_map =
STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
@@ -285,7 +290,28 @@ static int cpu_hotplug_disabled;
*/
void pin_current_cpu(void)
{
+ struct rt_rw_lock *cpuhp_pin;
+ unsigned int cpu;
+ int ret;
+
+again:
+ cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock);
+ ret = __read_rt_trylock(cpuhp_pin);
+ if (ret) {
+ current->pinned_on_cpu = smp_processor_id();
+ return;
+ }
+ cpu = smp_processor_id();
+ preempt_enable();
+
+ __read_rt_lock(cpuhp_pin);
+ preempt_disable();
+ if (cpu != smp_processor_id()) {
+ __read_rt_unlock(cpuhp_pin);
+ goto again;
+ }
+ current->pinned_on_cpu = cpu;
}
/**
@@ -293,6 +319,13 @@ void pin_current_cpu(void)
*/
void unpin_current_cpu(void)
{
+ struct rt_rw_lock *cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock);
+
+ if (WARN_ON(current->pinned_on_cpu != smp_processor_id()))
+ cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, current->pinned_on_cpu);
+
+ current->pinned_on_cpu = -1;
+ __read_rt_unlock(cpuhp_pin);
}
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
@@ -846,6 +879,7 @@ static int take_cpu_down(void *_param)
static int takedown_cpu(unsigned int cpu)
{
+ struct rt_rw_lock *cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, cpu);
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
@@ -858,11 +892,14 @@ static int takedown_cpu(unsigned int cpu
*/
irq_lock_sparse();
+ __write_rt_lock(cpuhp_pin);
+
/*
* So now all preempt/rcu users must observe !cpu_active().
*/
err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
if (err) {
+ __write_rt_unlock(cpuhp_pin);
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
@@ -881,6 +918,7 @@ static int takedown_cpu(unsigned int cpu
wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
+ __write_rt_unlock(cpuhp_pin);
/* Interrupts are moved away from the dying cpu, reenable alloc/free */
irq_unlock_sparse();
|