aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorWolfgang Wiedmeyer <wolfgit@wiedmeyer.de>2017-09-08 19:23:04 +0200
committerWolfgang Wiedmeyer <wolfgit@wiedmeyer.de>2017-09-08 19:23:04 +0200
commit2dc3d0d4086ea7e333d7cb65d940c3f483dbf47c (patch)
treecdfd3a49a51c06c0bf25c7942f42adb47bd99c89 /kernel
parente3e4496f794cfd3eb379ce67a748cb019fbc2840 (diff)
parent423532f98b4d3e57e5b59d5140e9c06e83f560a6 (diff)
downloadkernel_samsung_tuna-replicant-6.0-0002.tar.gz
kernel_samsung_tuna-replicant-6.0-0002.tar.bz2
kernel_samsung_tuna-replicant-6.0-0002.zip
Merge remote-tracking branch 'lineageos/cm-13.0' into replicant-6.0replicant-6.0-0003replicant-6.0-0002
Diffstat (limited to 'kernel')
-rw-r--r--kernel/auditsc.c333
-rw-r--r--kernel/events/core.c241
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/sched.c32
-rw-r--r--kernel/trace/ring_buffer.c10
6 files changed, 409 insertions, 214 deletions
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 00d79df03e7..c2f43dbd3cf 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -67,6 +67,7 @@
#include <linux/syscalls.h>
#include <linux/capability.h>
#include <linux/fs_struct.h>
+#include <linux/uaccess.h>
#include "audit.h"
@@ -77,7 +78,8 @@
/* Indicates that audit should log the full pathname. */
#define AUDIT_NAME_FULL -1
-/* no execve audit message should be longer than this (userspace limits) */
+/* no execve audit message should be longer than this (userspace limits),
+ * see the note near the top of audit_log_execve_info() about this value */
#define MAX_EXECVE_AUDIT_LEN 7500
/* number of audit rules */
@@ -1015,189 +1017,184 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid,
return rc;
}
-/*
- * to_send and len_sent accounting are very loose estimates. We aren't
- * really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being
- * within about 500 bytes (next page boundary)
- *
- * why snprintf? an int is up to 12 digits long. if we just assumed when
- * logging that a[%d]= was going to be 16 characters long we would be wasting
- * space in every audit message. In one 7500 byte message we can log up to
- * about 1000 min size arguments. That comes down to about 50% waste of space
- * if we didn't do the snprintf to find out how long arg_num_len was.
- */
-static int audit_log_single_execve_arg(struct audit_context *context,
- struct audit_buffer **ab,
- int arg_num,
- size_t *len_sent,
- const char __user *p,
- char *buf)
-{
- char arg_num_len_buf[12];
- const char __user *tmp_p = p;
- /* how many digits are in arg_num? 5 is the length of ' a=""' */
- size_t arg_num_len = snprintf(arg_num_len_buf, 12, "%d", arg_num) + 5;
- size_t len, len_left, to_send;
- size_t max_execve_audit_len = MAX_EXECVE_AUDIT_LEN;
- unsigned int i, has_cntl = 0, too_long = 0;
- int ret;
-
- /* strnlen_user includes the null we don't want to send */
- len_left = len = strnlen_user(p, MAX_ARG_STRLEN) - 1;
-
- /*
- * We just created this mm, if we can't find the strings
- * we just copied into it something is _very_ wrong. Similar
- * for strings that are too long, we should not have created
- * any.
- */
- if (unlikely((len == -1) || len > MAX_ARG_STRLEN - 1)) {
- WARN_ON(1);
- send_sig(SIGKILL, current, 0);
- return -1;
- }
-
- /* walk the whole argument looking for non-ascii chars */
- do {
- if (len_left > MAX_EXECVE_AUDIT_LEN)
- to_send = MAX_EXECVE_AUDIT_LEN;
- else
- to_send = len_left;
- ret = copy_from_user(buf, tmp_p, to_send);
- /*
- * There is no reason for this copy to be short. We just
- * copied them here, and the mm hasn't been exposed to user-
- * space yet.
- */
- if (ret) {
- WARN_ON(1);
- send_sig(SIGKILL, current, 0);
- return -1;
- }
- buf[to_send] = '\0';
- has_cntl = audit_string_contains_control(buf, to_send);
- if (has_cntl) {
- /*
- * hex messages get logged as 2 bytes, so we can only
- * send half as much in each message
- */
- max_execve_audit_len = MAX_EXECVE_AUDIT_LEN / 2;
- break;
- }
- len_left -= to_send;
- tmp_p += to_send;
- } while (len_left > 0);
-
- len_left = len;
-
- if (len > max_execve_audit_len)
- too_long = 1;
-
- /* rewalk the argument actually logging the message */
- for (i = 0; len_left > 0; i++) {
- int room_left;
-
- if (len_left > max_execve_audit_len)
- to_send = max_execve_audit_len;
- else
- to_send = len_left;
-
- /* do we have space left to send this argument in this ab? */
- room_left = MAX_EXECVE_AUDIT_LEN - arg_num_len - *len_sent;
- if (has_cntl)
- room_left -= (to_send * 2);
- else
- room_left -= to_send;
- if (room_left < 0) {
- *len_sent = 0;
- audit_log_end(*ab);
- *ab = audit_log_start(context, GFP_KERNEL, AUDIT_EXECVE);
- if (!*ab)
- return 0;
- }
-
- /*
- * first record needs to say how long the original string was
- * so we can be sure nothing was lost.
- */
- if ((i == 0) && (too_long))
- audit_log_format(*ab, " a%d_len=%zu", arg_num,
- has_cntl ? 2*len : len);
-
- /*
- * normally arguments are small enough to fit and we already
- * filled buf above when we checked for control characters
- * so don't bother with another copy_from_user
- */
- if (len >= max_execve_audit_len)
- ret = copy_from_user(buf, p, to_send);
- else
- ret = 0;
- if (ret) {
- WARN_ON(1);
- send_sig(SIGKILL, current, 0);
- return -1;
- }
- buf[to_send] = '\0';
-
- /* actually log it */
- audit_log_format(*ab, " a%d", arg_num);
- if (too_long)
- audit_log_format(*ab, "[%d]", i);
- audit_log_format(*ab, "=");
- if (has_cntl)
- audit_log_n_hex(*ab, buf, to_send);
- else
- audit_log_string(*ab, buf);
-
- p += to_send;
- len_left -= to_send;
- *len_sent += arg_num_len;
- if (has_cntl)
- *len_sent += to_send * 2;
- else
- *len_sent += to_send;
- }
- /* include the null we didn't log */
- return len + 1;
-}
-
static void audit_log_execve_info(struct audit_context *context,
struct audit_buffer **ab,
struct audit_aux_data_execve *axi)
{
- int i;
- size_t len, len_sent = 0;
- const char __user *p;
+ long len_max;
+ long len_rem;
+ long len_full;
+ long len_buf;
+ long len_abuf;
+ long len_tmp;
+ bool require_data;
+ bool encode;
+ unsigned int iter;
+ unsigned int arg;
+ char *buf_head;
char *buf;
+ const char __user *p;
+
+ /* NOTE: this buffer needs to be large enough to hold all the non-arg
+ * data we put in the audit record for this argument (see the
+ * code below) ... at this point in time 96 is plenty */
+ char abuf[96];
if (axi->mm != current->mm)
return; /* execve failed, no additional info */
p = (const char __user *)axi->mm->arg_start;
- audit_log_format(*ab, "argc=%d", axi->argc);
+ /* NOTE: we set MAX_EXECVE_AUDIT_LEN to a rather arbitrary limit, the
+ * current value of 7500 is not as important as the fact that it
+ * is less than 8k, a setting of 7500 gives us plenty of wiggle
+ * room if we go over a little bit in the logging below */
+ WARN_ON_ONCE(MAX_EXECVE_AUDIT_LEN > 7500);
+ len_max = MAX_EXECVE_AUDIT_LEN;
- /*
- * we need some kernel buffer to hold the userspace args. Just
- * allocate one big one rather than allocating one of the right size
- * for every single argument inside audit_log_single_execve_arg()
- * should be <8k allocation so should be pretty safe.
- */
- buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);
- if (!buf) {
+ /* scratch buffer to hold the userspace args */
+ buf_head = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);
+ if (!buf_head) {
audit_panic("out of memory for argv string\n");
return;
}
+ buf = buf_head;
- for (i = 0; i < axi->argc; i++) {
- len = audit_log_single_execve_arg(context, ab, i,
- &len_sent, p, buf);
- if (len <= 0)
- break;
- p += len;
- }
- kfree(buf);
+ audit_log_format(*ab, "argc=%d", axi->argc);
+
+ len_rem = len_max;
+ len_buf = 0;
+ len_full = 0;
+ require_data = true;
+ encode = false;
+ iter = 0;
+ arg = 0;
+ do {
+ /* NOTE: we don't ever want to trust this value for anything
+ * serious, but the audit record format insists we
+ * provide an argument length for really long arguments,
+ * e.g. > MAX_EXECVE_AUDIT_LEN, so we have no choice but
+ * to use strncpy_from_user() to obtain this value for
+ * recording in the log, although we don't use it
+ * anywhere here to avoid a double-fetch problem */
+ if (len_full == 0)
+ len_full = strnlen_user(p, MAX_ARG_STRLEN) - 1;
+
+ /* read more data from userspace */
+ if (require_data) {
+ /* can we make more room in the buffer? */
+ if (buf != buf_head) {
+ memmove(buf_head, buf, len_buf);
+ buf = buf_head;
+ }
+
+ /* fetch as much as we can of the argument */
+ len_tmp = strncpy_from_user(&buf_head[len_buf], p,
+ len_max - len_buf);
+ if (len_tmp == -EFAULT) {
+ /* unable to copy from userspace */
+ send_sig(SIGKILL, current, 0);
+ goto out;
+ } else if (len_tmp == (len_max - len_buf)) {
+ /* buffer is not large enough */
+ require_data = true;
+ /* NOTE: if we are going to span multiple
+ * buffers force the encoding so we stand
+ * a chance at a sane len_full value and
+ * consistent record encoding */
+ encode = true;
+ len_full = len_full * 2;
+ p += len_tmp;
+ } else {
+ require_data = false;
+ if (!encode)
+ encode = audit_string_contains_control(
+ buf, len_tmp);
+ /* try to use a trusted value for len_full */
+ if (len_full < len_max)
+ len_full = (encode ?
+ len_tmp * 2 : len_tmp);
+ p += len_tmp + 1;
+ }
+ len_buf += len_tmp;
+ buf_head[len_buf] = '\0';
+
+ /* length of the buffer in the audit record? */
+ len_abuf = (encode ? len_buf * 2 : len_buf + 2);
+ }
+
+ /* write as much as we can to the audit log */
+ if (len_buf > 0) {
+ /* NOTE: some magic numbers here - basically if we
+ * can't fit a reasonable amount of data into the
+ * existing audit buffer, flush it and start with
+ * a new buffer */
+ if ((sizeof(abuf) + 8) > len_rem) {
+ len_rem = len_max;
+ audit_log_end(*ab);
+ *ab = audit_log_start(context,
+ GFP_KERNEL, AUDIT_EXECVE);
+ if (!*ab)
+ goto out;
+ }
+
+ /* create the non-arg portion of the arg record */
+ len_tmp = 0;
+ if (require_data || (iter > 0) ||
+ ((len_abuf + sizeof(abuf)) > len_rem)) {
+ if (iter == 0) {
+ len_tmp += snprintf(&abuf[len_tmp],
+ sizeof(abuf) - len_tmp,
+ " a%d_len=%lu",
+ arg, len_full);
+ }
+ len_tmp += snprintf(&abuf[len_tmp],
+ sizeof(abuf) - len_tmp,
+ " a%d[%d]=", arg, iter++);
+ } else
+ len_tmp += snprintf(&abuf[len_tmp],
+ sizeof(abuf) - len_tmp,
+ " a%d=", arg);
+ WARN_ON(len_tmp >= sizeof(abuf));
+ abuf[sizeof(abuf) - 1] = '\0';
+
+ /* log the arg in the audit record */
+ audit_log_format(*ab, "%s", abuf);
+ len_rem -= len_tmp;
+ len_tmp = len_buf;
+ if (encode) {
+ if (len_abuf > len_rem)
+ len_tmp = len_rem / 2; /* encoding */
+ audit_log_n_hex(*ab, buf, len_tmp);
+ len_rem -= len_tmp * 2;
+ len_abuf -= len_tmp * 2;
+ } else {
+ if (len_abuf > len_rem)
+ len_tmp = len_rem - 2; /* quotes */
+ audit_log_n_string(*ab, buf, len_tmp);
+ len_rem -= len_tmp + 2;
+ /* don't subtract the "2" because we still need
+ * to add quotes to the remaining string */
+ len_abuf -= len_tmp;
+ }
+ len_buf -= len_tmp;
+ buf += len_tmp;
+ }
+
+ /* ready to move to the next argument? */
+ if ((len_buf == 0) && !require_data) {
+ arg++;
+ iter = 0;
+ len_full = 0;
+ require_data = true;
+ encode = false;
+ }
+ } while (arg < axi->argc);
+
+ /* NOTE: the caller handles the final audit_log_end() call */
+
+out:
+ kfree(buf_head);
}
static void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4d6c8e6ff4c..ad56e8c2835 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -609,6 +609,77 @@ static void put_ctx(struct perf_event_context *ctx)
}
}
+/*
+ * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
+ * perf_pmu_migrate_context() we need some magic.
+ *
+ * Those places that change perf_event::ctx will hold both
+ * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
+ *
+ * Lock ordering is by mutex address. There is one other site where
+ * perf_event_context::mutex nests and that is put_event(). But remember that
+ * that is a parent<->child context relation, and migration does not affect
+ * children, therefore these two orderings should not interact.
+ *
+ * The change in perf_event::ctx does not affect children (as claimed above)
+ * because the sys_perf_event_open() case will install a new event and break
+ * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
+ * concerned with cpuctx and that doesn't have children.
+ *
+ * The places that change perf_event::ctx will issue:
+ *
+ * perf_remove_from_context();
+ * synchronize_rcu();
+ * perf_install_in_context();
+ *
+ * to affect the change. The remove_from_context() + synchronize_rcu() should
+ * quiesce the event, after which we can install it in the new location. This
+ * means that only external vectors (perf_fops, prctl) can perturb the event
+ * while in transit. Therefore all such accessors should also acquire
+ * perf_event_context::mutex to serialize against this.
+ *
+ * However; because event->ctx can change while we're waiting to acquire
+ * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
+ * function.
+ *
+ * Lock order:
+ * task_struct::perf_event_mutex
+ * perf_event_context::mutex
+ * perf_event_context::lock
+ * perf_event::child_mutex;
+ * perf_event::mmap_mutex
+ * mmap_sem
+ */
+static struct perf_event_context *perf_event_ctx_lock(struct perf_event *event)
+{
+ struct perf_event_context *ctx;
+
+again:
+ rcu_read_lock();
+ ctx = ACCESS_ONCE(event->ctx);
+ if (!atomic_inc_not_zero(&ctx->refcount)) {
+ rcu_read_unlock();
+ goto again;
+ }
+ rcu_read_unlock();
+
+ mutex_lock(&ctx->mutex);
+ if (event->ctx != ctx) {
+ mutex_unlock(&ctx->mutex);
+ put_ctx(ctx);
+ goto again;
+ }
+
+ return ctx;
+}
+
+static void perf_event_ctx_unlock(struct perf_event *event,
+ struct perf_event_context *ctx)
+{
+ mutex_unlock(&ctx->mutex);
+ put_ctx(ctx);
+}
+
static void unclone_ctx(struct perf_event_context *ctx)
{
if (ctx->parent_ctx) {
@@ -1250,7 +1321,7 @@ static int __perf_event_disable(void *info)
* is the current context on this CPU and preemption is disabled,
* hence we can't get into perf_event_task_sched_out for this context.
*/
-void perf_event_disable(struct perf_event *event)
+static void _perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
@@ -1292,6 +1363,19 @@ retry:
raw_spin_unlock_irq(&ctx->lock);
}
+/*
+ * Strictly speaking kernel users cannot create groups and therefore this
+ * interface does not need the perf_event_ctx_lock() magic.
+ */
+void perf_event_disable(struct perf_event *event)
+{
+ struct perf_event_context *ctx;
+
+ ctx = perf_event_ctx_lock(event);
+ _perf_event_disable(event);
+ perf_event_ctx_unlock(event, ctx);
+}
+
static void perf_set_shadow_time(struct perf_event *event,
struct perf_event_context *ctx,
u64 tstamp)
@@ -1725,7 +1809,7 @@ unlock:
* perf_event_for_each_child or perf_event_for_each as described
* for perf_event_disable.
*/
-void perf_event_enable(struct perf_event *event)
+static void _perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
@@ -1782,7 +1866,19 @@ out:
raw_spin_unlock_irq(&ctx->lock);
}
-static int perf_event_refresh(struct perf_event *event, int refresh)
+/*
+ * See perf_event_disable();
+ */
+void perf_event_enable(struct perf_event *event)
+{
+ struct perf_event_context *ctx;
+
+ ctx = perf_event_ctx_lock(event);
+ _perf_event_enable(event);
+ perf_event_ctx_unlock(event, ctx);
+}
+
+static int _perf_event_refresh(struct perf_event *event, int refresh)
{
/*
* not supported on inherited events
@@ -1791,11 +1887,26 @@ static int perf_event_refresh(struct perf_event *event, int refresh)
return -EINVAL;
atomic_add(refresh, &event->event_limit);
- perf_event_enable(event);
+ _perf_event_enable(event);
return 0;
}
+/*
+ * See perf_event_disable()
+ */
+int perf_event_refresh(struct perf_event *event, int refresh)
+{
+ struct perf_event_context *ctx;
+ int ret;
+
+ ctx = perf_event_ctx_lock(event);
+ ret = _perf_event_refresh(event, refresh);
+ perf_event_ctx_unlock(event, ctx);
+
+ return ret;
+}
+
static void ctx_sched_out(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
@@ -3039,7 +3150,16 @@ static void put_event(struct perf_event *event)
rcu_read_unlock();
if (owner) {
- mutex_lock(&owner->perf_event_mutex);
+ /*
+ * If we're here through perf_event_exit_task() we're already
+ * holding ctx->mutex which would be an inversion wrt. the
+ * normal lock order.
+ *
+ * However we can safely take this lock because its the child
+ * ctx->mutex.
+ */
+ mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
+
/*
* We have to re-check the event->owner field, if it is cleared
* we raced with perf_event_exit_task(), acquiring the mutex
@@ -3091,12 +3211,13 @@ static int perf_event_read_group(struct perf_event *event,
u64 read_format, char __user *buf)
{
struct perf_event *leader = event->group_leader, *sub;
- int n = 0, size = 0, ret = -EFAULT;
struct perf_event_context *ctx = leader->ctx;
- u64 values[5];
+ int n = 0, size = 0, ret;
u64 count, enabled, running;
+ u64 values[5];
+
+ lockdep_assert_held(&ctx->mutex);
- mutex_lock(&ctx->mutex);
count = perf_event_read_value(leader, &enabled, &running);
values[n++] = 1 + leader->nr_siblings;
@@ -3111,7 +3232,7 @@ static int perf_event_read_group(struct perf_event *event,
size = n * sizeof(u64);
if (copy_to_user(buf, values, size))
- goto unlock;
+ return -EFAULT;
ret = size;
@@ -3125,14 +3246,12 @@ static int perf_event_read_group(struct perf_event *event,
size = n * sizeof(u64);
if (copy_to_user(buf + ret, values, size)) {
- ret = -EFAULT;
- goto unlock;
+ return -EFAULT;
}
ret += size;
}
-unlock:
- mutex_unlock(&ctx->mutex);
+
return ret;
}
@@ -3191,8 +3310,14 @@ static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct perf_event *event = file->private_data;
+ struct perf_event_context *ctx;
+ int ret;
+
+ ctx = perf_event_ctx_lock(event);
+ ret = perf_read_hw(event, buf, count);
+ perf_event_ctx_unlock(event, ctx);
- return perf_read_hw(event, buf, count);
+ return ret;
}
static unsigned int perf_poll(struct file *file, poll_table *wait)
@@ -3212,7 +3337,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
return events;
}
-static void perf_event_reset(struct perf_event *event)
+static void _perf_event_reset(struct perf_event *event)
{
(void)perf_event_read(event);
local64_set(&event->count, 0);
@@ -3244,15 +3369,13 @@ static void perf_event_for_each(struct perf_event *event,
struct perf_event_context *ctx = event->ctx;
struct perf_event *sibling;
- WARN_ON_ONCE(ctx->parent_ctx);
- mutex_lock(&ctx->mutex);
+ lockdep_assert_held(&ctx->mutex);
event = event->group_leader;
perf_event_for_each_child(event, func);
func(event);
list_for_each_entry(sibling, &event->sibling_list, group_entry)
perf_event_for_each_child(event, func);
- mutex_unlock(&ctx->mutex);
}
static int perf_event_period(struct perf_event *event, u64 __user *arg)
@@ -3311,25 +3434,24 @@ static int perf_event_set_output(struct perf_event *event,
struct perf_event *output_event);
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
-static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
{
- struct perf_event *event = file->private_data;
void (*func)(struct perf_event *);
u32 flags = arg;
switch (cmd) {
case PERF_EVENT_IOC_ENABLE:
- func = perf_event_enable;
+ func = _perf_event_enable;
break;
case PERF_EVENT_IOC_DISABLE:
- func = perf_event_disable;
+ func = _perf_event_disable;
break;
case PERF_EVENT_IOC_RESET:
- func = perf_event_reset;
+ func = _perf_event_reset;
break;
case PERF_EVENT_IOC_REFRESH:
- return perf_event_refresh(event, arg);
+ return _perf_event_refresh(event, arg);
case PERF_EVENT_IOC_PERIOD:
return perf_event_period(event, (u64 __user *)arg);
@@ -3370,13 +3492,30 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
+static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct perf_event *event = file->private_data;
+ struct perf_event_context *ctx;
+ long ret;
+
+ ctx = perf_event_ctx_lock(event);
+ ret = _perf_ioctl(event, cmd, arg);
+ perf_event_ctx_unlock(event, ctx);
+
+ return ret;
+}
+
int perf_event_task_enable(void)
{
+ struct perf_event_context *ctx;
struct perf_event *event;
mutex_lock(&current->perf_event_mutex);
- list_for_each_entry(event, &current->perf_event_list, owner_entry)
- perf_event_for_each_child(event, perf_event_enable);
+ list_for_each_entry(event, &current->perf_event_list, owner_entry) {
+ ctx = perf_event_ctx_lock(event);
+ perf_event_for_each_child(event, _perf_event_enable);
+ perf_event_ctx_unlock(event, ctx);
+ }
mutex_unlock(&current->perf_event_mutex);
return 0;
@@ -3384,11 +3523,15 @@ int perf_event_task_enable(void)
int perf_event_task_disable(void)
{
+ struct perf_event_context *ctx;
struct perf_event *event;
mutex_lock(&current->perf_event_mutex);
- list_for_each_entry(event, &current->perf_event_list, owner_entry)
- perf_event_for_each_child(event, perf_event_disable);
+ list_for_each_entry(event, &current->perf_event_list, owner_entry) {
+ ctx = perf_event_ctx_lock(event);
+ perf_event_for_each_child(event, _perf_event_disable);
+ perf_event_ctx_unlock(event, ctx);
+ }
mutex_unlock(&current->perf_event_mutex);
return 0;
@@ -5421,7 +5564,6 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
int err = 0;
mutex_lock(&swhash->hlist_mutex);
-
if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
struct swevent_hlist *hlist;
@@ -6449,6 +6591,15 @@ out:
return ret;
}
+static void mutex_lock_double(struct mutex *a, struct mutex *b)
+{
+ if (b < a)
+ swap(a, b);
+
+ mutex_lock(a);
+ mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
+}
+
/**
* sys_perf_event_open - open a performance event, associate it to a task/cpu
*
@@ -6464,7 +6615,7 @@ SYSCALL_DEFINE5(perf_event_open,
struct perf_event *group_leader = NULL, *output_event = NULL;
struct perf_event *event, *sibling;
struct perf_event_attr attr;
- struct perf_event_context *ctx;
+ struct perf_event_context *ctx, *uninitialized_var(gctx);
struct file *event_file = NULL;
struct file *group_file = NULL;
struct task_struct *task = NULL;
@@ -6638,9 +6789,13 @@ SYSCALL_DEFINE5(perf_event_open,
}
if (move_group) {
- struct perf_event_context *gctx = group_leader->ctx;
+ gctx = group_leader->ctx;
- mutex_lock(&gctx->mutex);
+ /*
+ * See perf_event_ctx_lock() for comments on the details
+ * of swizzling perf_event::ctx.
+ */
+ mutex_lock_double(&gctx->mutex, &ctx->mutex);
perf_remove_from_context(group_leader);
/*
@@ -6655,14 +6810,19 @@ SYSCALL_DEFINE5(perf_event_open,
perf_event__state_init(sibling);
put_ctx(gctx);
}
- mutex_unlock(&gctx->mutex);
- put_ctx(gctx);
+ } else {
+ mutex_lock(&ctx->mutex);
}
WARN_ON_ONCE(ctx->parent_ctx);
- mutex_lock(&ctx->mutex);
if (move_group) {
+ /*
+ * Wait for everybody to stop referencing the events through
+ * the old lists, before installing it on new lists.
+ */
+ synchronize_rcu();
+
perf_install_in_context(ctx, group_leader, cpu);
get_ctx(ctx);
list_for_each_entry(sibling, &group_leader->sibling_list,
@@ -6675,6 +6835,11 @@ SYSCALL_DEFINE5(perf_event_open,
perf_install_in_context(ctx, event, cpu);
++ctx->generation;
perf_unpin_context(ctx);
+
+ if (move_group) {
+ mutex_unlock(&gctx->mutex);
+ put_ctx(gctx);
+ }
mutex_unlock(&ctx->mutex);
event->owner = current;
@@ -7320,12 +7485,6 @@ static void perf_event_exit_cpu_context(int cpu)
static void perf_event_exit_cpu(int cpu)
{
- struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
-
- mutex_lock(&swhash->hlist_mutex);
- swevent_hlist_release(swhash);
- mutex_unlock(&swhash->hlist_mutex);
-
perf_event_exit_cpu_context(cpu);
}
#else
diff --git a/kernel/exit.c b/kernel/exit.c
index bfeca60432c..ea9b875239c 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -954,6 +954,11 @@ NORET_TYPE void do_exit(long code)
exit_irq_thread();
exit_signals(tsk); /* sets PF_EXITING */
+
+ if (tsk->flags & PF_SU) {
+ su_exit();
+ }
+
/*
* tsk->flags are checked in the futex code to protect against
* an exiting task cleaning up the robust pi futexes.
diff --git a/kernel/fork.c b/kernel/fork.c
index 2483c50a722..e6572d60488 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -291,6 +291,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
if (err)
goto out;
+ tsk->flags &= ~PF_SU;
+
tsk->stack = ti;
err = prop_local_init_single(&tsk->dirties);
diff --git a/kernel/sched.c b/kernel/sched.c
index efb62f04e61..0484805f742 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -84,6 +84,38 @@
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
+static atomic_t __su_instances;
+
+int su_instances(void)
+{
+ return atomic_read(&__su_instances);
+}
+
+bool su_running(void)
+{
+ return su_instances() > 0;
+}
+
+bool su_visible(void)
+{
+ uid_t uid = current_uid();
+ if (su_running())
+ return true;
+ if (uid == 0 || uid == 1000)
+ return true;
+ return false;
+}
+
+void su_exec(void)
+{
+ atomic_inc(&__su_instances);
+}
+
+void su_exit(void)
+{
+ atomic_dec(&__su_instances);
+}
+
/*
* Convert user-nice values [ -20 ... 0 ... 19 ]
* to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 20dff64b521..f551e6413bc 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1324,13 +1324,14 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
if (!buffer)
return size;
- size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
- size *= BUF_PAGE_SIZE;
+ nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
buffer_size = buffer->pages * BUF_PAGE_SIZE;
/* we need a minimum of two pages */
- if (size < BUF_PAGE_SIZE * 2)
- size = BUF_PAGE_SIZE * 2;
+ if (nr_pages < 2)
+ nr_pages = 2;
+
+ size = nr_pages * BUF_PAGE_SIZE;
if (size == buffer_size)
return size;
@@ -1343,7 +1344,6 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
mutex_lock(&buffer->mutex);
get_online_cpus();
- nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
if (size < buffer_size) {