summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYabin Cui <yabinc@google.com>2020-02-27 17:11:58 -0800
committerYabin Cui <yabinc@google.com>2020-02-28 10:14:21 -0800
commitf18803427541f34082fd659a4dd0deb49a8a4b89 (patch)
tree2da7610edef803d71f05bc3d0301516885a40120
parent0941edfb7ed31cea4a27334377b87746352edec8 (diff)
downloadplatform_system_bpfprogs-f18803427541f34082fd659a4dd0deb49a8a4b89.tar.gz
platform_system_bpfprogs-f18803427541f34082fd659a4dd0deb49a8a4b89.tar.bz2
platform_system_bpfprogs-f18803427541f34082fd659a4dd0deb49a8a4b89.zip
time_in_state: don't block sched_switch events.
Returning zero in sched_switch bpf program blocks userspace from receiving sched_switch events from perf_event_fd interface. Bug: 149797433 Test: build and boot. Change-Id: Icea311d6fe05d2698a624d17d47141fc735681de
-rw-r--r--time_in_state.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/time_in_state.c b/time_in_state.c
index cb56a93..5befc6d 100644
--- a/time_in_state.c
+++ b/time_in_state.c
@@ -45,23 +45,24 @@ struct switch_args {
DEFINE_BPF_PROG("tracepoint/sched/sched_switch", AID_ROOT, AID_SYSTEM, tp_sched_switch)
(struct switch_args* args) {
+ const int ALLOW = 1; // return 1 to avoid blocking simpleperf from receiving events.
uint32_t zero = 0;
uint64_t* last = bpf_cpu_last_update_map_lookup_elem(&zero);
- if (!last) return 0;
+ if (!last) return ALLOW;
uint64_t old_last = *last;
uint64_t time = bpf_ktime_get_ns();
*last = time;
uint32_t* active = bpf_nr_active_map_lookup_elem(&zero);
- if (!active) return 0;
+ if (!active) return ALLOW;
uint32_t cpu = bpf_get_smp_processor_id();
uint32_t* policyp = bpf_cpu_policy_map_lookup_elem(&cpu);
- if (!policyp) return 0;
+ if (!policyp) return ALLOW;
uint32_t policy = *policyp;
uint32_t* policy_active = bpf_policy_nr_active_map_lookup_elem(&policy);
- if (!policy_active) return 0;
+ if (!policy_active) return ALLOW;
uint32_t nactive = *active - 1;
uint32_t policy_nactive = *policy_active - 1;
@@ -77,7 +78,7 @@ DEFINE_BPF_PROG("tracepoint/sched/sched_switch", AID_ROOT, AID_SYSTEM, tp_sched_
// 2) old_last == 0, so this is the first time we've seen this CPU. Any delta will be invalid,
// and our active CPU counts don't include this CPU yet so we shouldn't decrement them even
// if we're going idle.
- if (!args->prev_pid || !old_last) return 0;
+ if (!args->prev_pid || !old_last) return ALLOW;
if (!args->next_pid) {
__sync_fetch_and_add(active, -1);
@@ -85,7 +86,7 @@ DEFINE_BPF_PROG("tracepoint/sched/sched_switch", AID_ROOT, AID_SYSTEM, tp_sched_
}
uint8_t* freq_idxp = bpf_policy_freq_idx_map_lookup_elem(&policy);
- if (!freq_idxp || !*freq_idxp) return 0;
+ if (!freq_idxp || !*freq_idxp) return ALLOW;
// freq_to_idx_map uses 1 as its minimum index so that *freq_idxp == 0 only when uninitialized
uint8_t freq_idx = *freq_idxp - 1;
@@ -125,7 +126,7 @@ DEFINE_BPF_PROG("tracepoint/sched/sched_switch", AID_ROOT, AID_SYSTEM, tp_sched_
} else {
bpf_uid_last_update_map_update_elem(&uid, &time, BPF_NOEXIST);
}
- return 0;
+ return ALLOW;
}
struct cpufreq_args {