aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-12 18:28:00 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-12 18:28:00 -0700
commitb08fc5277aaa1d8ea15470d38bf36f19dfb0e125 (patch)
tree1910dc474cb1ede95581dd9faa81a3bebeded0dc /kernel
parent4597fcff07044d89c646d0c5d8b42cd976d966a1 (diff)
parent9d2a789c1db75d0f55b14fa57bec548d94332ad8 (diff)
downloadkernel_replicant_linux-b08fc5277aaa1d8ea15470d38bf36f19dfb0e125.tar.gz
kernel_replicant_linux-b08fc5277aaa1d8ea15470d38bf36f19dfb0e125.tar.bz2
kernel_replicant_linux-b08fc5277aaa1d8ea15470d38bf36f19dfb0e125.zip
Merge tag 'overflow-v4.18-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull more overflow updates from Kees Cook: "The rest of the overflow changes for v4.18-rc1. This includes the explicit overflow fixes from Silvio, further struct_size() conversions from Matthew, and a bug fix from Dan. But the bulk of it is the treewide conversions to use either the 2-factor argument allocators (e.g. kmalloc(a * b, ...) into kmalloc_array(a, b, ...) or the array_size() macros (e.g. vmalloc(a * b) into vmalloc(array_size(a, b)). Coccinelle was fighting me on several fronts, so I've done a bunch of manual whitespace updates in the patches as well. Summary: - Error path bug fix for overflow tests (Dan) - Additional struct_size() conversions (Matthew, Kees) - Explicitly reported overflow fixes (Silvio, Kees) - Add missing kvcalloc() function (Kees) - Treewide conversions of allocators to use either 2-factor argument variant when available, or array_size() and array3_size() as needed (Kees)" * tag 'overflow-v4.18-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (26 commits) treewide: Use array_size in f2fs_kvzalloc() treewide: Use array_size() in f2fs_kzalloc() treewide: Use array_size() in f2fs_kmalloc() treewide: Use array_size() in sock_kmalloc() treewide: Use array_size() in kvzalloc_node() treewide: Use array_size() in vzalloc_node() treewide: Use array_size() in vzalloc() treewide: Use array_size() in vmalloc() treewide: devm_kzalloc() -> devm_kcalloc() treewide: devm_kmalloc() -> devm_kmalloc_array() treewide: kvzalloc() -> kvcalloc() treewide: kvmalloc() -> kvmalloc_array() treewide: kzalloc_node() -> kcalloc_node() treewide: kzalloc() -> kcalloc() treewide: kmalloc() -> kmalloc_array() mm: Introduce kvcalloc() video: uvesafb: Fix integer overflow in allocation UBIFS: Fix potential integer overflow in allocation leds: Use struct_size() in allocation Convert intel uncore to struct_size ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/btf.c8
-rw-r--r--kernel/bpf/lpm_trie.c5
-rw-r--r--kernel/bpf/verifier.c10
-rw-r--r--kernel/cgroup/cgroup-v1.c4
-rw-r--r--kernel/cgroup/cpuset.c5
-rw-r--r--kernel/debug/kdb/kdb_main.c13
-rw-r--r--kernel/events/ring_buffer.c3
-rw-r--r--kernel/events/uprobes.c3
-rw-r--r--kernel/fail_function.c2
-rw-r--r--kernel/kexec_file.c2
-rw-r--r--kernel/locking/locktorture.c14
-rw-r--r--kernel/power/swap.c6
-rw-r--r--kernel/rcu/rcutorture.c5
-rw-r--r--kernel/relay.c3
-rw-r--r--kernel/sched/fair.c4
-rw-r--r--kernel/sched/rt.c4
-rw-r--r--kernel/sched/topology.c2
-rw-r--r--kernel/sysctl.c3
-rw-r--r--kernel/trace/ftrace.c28
-rw-r--r--kernel/trace/trace.c12
-rw-r--r--kernel/trace/trace_events_filter.c6
-rw-r--r--kernel/trace/tracing_map.c2
-rw-r--r--kernel/user_namespace.c5
-rw-r--r--kernel/workqueue.c2
24 files changed, 87 insertions, 64 deletions
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 8653ab004c73..2d49d18b793a 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -608,7 +608,7 @@ static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
new_size = min_t(u32, BTF_MAX_TYPE,
btf->types_size + expand_by);
- new_types = kvzalloc(new_size * sizeof(*new_types),
+ new_types = kvcalloc(new_size, sizeof(*new_types),
GFP_KERNEL | __GFP_NOWARN);
if (!new_types)
return -ENOMEM;
@@ -698,17 +698,17 @@ static int env_resolve_init(struct btf_verifier_env *env)
u8 *visit_states = NULL;
/* +1 for btf_void */
- resolved_sizes = kvzalloc((nr_types + 1) * sizeof(*resolved_sizes),
+ resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
GFP_KERNEL | __GFP_NOWARN);
if (!resolved_sizes)
goto nomem;
- resolved_ids = kvzalloc((nr_types + 1) * sizeof(*resolved_ids),
+ resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
GFP_KERNEL | __GFP_NOWARN);
if (!resolved_ids)
goto nomem;
- visit_states = kvzalloc((nr_types + 1) * sizeof(*visit_states),
+ visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
GFP_KERNEL | __GFP_NOWARN);
if (!visit_states)
goto nomem;
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index b4b5b81e7251..1603492c9cc7 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -623,8 +623,9 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
if (!key || key->prefixlen > trie->max_prefixlen)
goto find_leftmost;
- node_stack = kmalloc(trie->max_prefixlen * sizeof(struct lpm_trie_node *),
- GFP_ATOMIC | __GFP_NOWARN);
+ node_stack = kmalloc_array(trie->max_prefixlen,
+ sizeof(struct lpm_trie_node *),
+ GFP_ATOMIC | __GFP_NOWARN);
if (!node_stack)
return -ENOMEM;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index cced0c1e63e2..9e2bf834f13a 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5206,7 +5206,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
if (cnt == 1)
return 0;
- new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
+ new_data = vzalloc(array_size(prog_len,
+ sizeof(struct bpf_insn_aux_data)));
if (!new_data)
return -ENOMEM;
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
@@ -5447,7 +5448,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
insn->imm = 1;
}
- func = kzalloc(sizeof(prog) * env->subprog_cnt, GFP_KERNEL);
+ func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
if (!func)
return -ENOMEM;
@@ -5870,8 +5871,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
return -ENOMEM;
log = &env->log;
- env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
- (*prog)->len);
+ env->insn_aux_data =
+ vzalloc(array_size(sizeof(struct bpf_insn_aux_data),
+ (*prog)->len));
ret = -ENOMEM;
if (!env->insn_aux_data)
goto err_free_env;
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index e06c97f3ed1a..8b4f0768efd6 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -195,9 +195,9 @@ struct cgroup_pidlist {
static void *pidlist_allocate(int count)
{
if (PIDLIST_TOO_LARGE(count))
- return vmalloc(count * sizeof(pid_t));
+ return vmalloc(array_size(count, sizeof(pid_t)));
else
- return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
+ return kmalloc_array(count, sizeof(pid_t), GFP_KERNEL);
}
static void pidlist_free(void *p)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index b42037e6e81d..d8b12e0d39cd 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -683,7 +683,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
goto done;
}
- csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL);
+ csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
if (!csa)
goto done;
csn = 0;
@@ -753,7 +753,8 @@ restart:
* The rest of the code, including the scheduler, can deal with
* dattr==NULL case. No need to abort if alloc fails.
*/
- dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
+ dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
+ GFP_KERNEL);
for (nslot = 0, i = 0; i < csn; i++) {
struct cpuset *a = csa[i];
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index e405677ee08d..2ddfce8f1e8f 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -691,7 +691,7 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0)
}
if (!s->usable)
return KDB_NOTIMP;
- s->command = kzalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB);
+ s->command = kcalloc(s->count + 1, sizeof(*(s->command)), GFP_KDB);
if (!s->command) {
kdb_printf("Could not allocate new kdb_defcmd table for %s\n",
cmdstr);
@@ -729,8 +729,8 @@ static int kdb_defcmd(int argc, const char **argv)
kdb_printf("Command only available during kdb_init()\n");
return KDB_NOTIMP;
}
- defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set),
- GFP_KDB);
+ defcmd_set = kmalloc_array(defcmd_set_count + 1, sizeof(*defcmd_set),
+ GFP_KDB);
if (!defcmd_set)
goto fail_defcmd;
memcpy(defcmd_set, save_defcmd_set,
@@ -2706,8 +2706,11 @@ int kdb_register_flags(char *cmd,
}
if (i >= kdb_max_commands) {
- kdbtab_t *new = kmalloc((kdb_max_commands - KDB_BASE_CMD_MAX +
- kdb_command_extend) * sizeof(*new), GFP_KDB);
+ kdbtab_t *new = kmalloc_array(kdb_max_commands -
+ KDB_BASE_CMD_MAX +
+ kdb_command_extend,
+ sizeof(*new),
+ GFP_KDB);
if (!new) {
kdb_printf("Could not allocate new kdb_command "
"table\n");
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 1d8ca9ea9979..045a37e9ddee 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -614,7 +614,8 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
}
}
- rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
+ rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
+ node);
if (!rb->aux_pages)
return -ENOMEM;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 1725b902983f..ccc579a7d32e 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1184,7 +1184,8 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
if (unlikely(!area))
goto out;
- area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
+ area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
+ GFP_KERNEL);
if (!area->bitmap)
goto free_area;
diff --git a/kernel/fail_function.c b/kernel/fail_function.c
index 1d5632d8bbcc..5349c91c2298 100644
--- a/kernel/fail_function.c
+++ b/kernel/fail_function.c
@@ -258,7 +258,7 @@ static ssize_t fei_write(struct file *file, const char __user *buffer,
/* cut off if it is too long */
if (count > KSYM_NAME_LEN)
count = KSYM_NAME_LEN;
- buf = kmalloc(sizeof(char) * (count + 1), GFP_KERNEL);
+ buf = kmalloc(count + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 75d8e7cf040e..c6a3b6851372 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -793,7 +793,7 @@ static int kexec_purgatory_setup_sechdrs(struct purgatory_info *pi,
* The section headers in kexec_purgatory are read-only. In order to
* have them modifiable make a temporary copy.
*/
- sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr));
+ sechdrs = vzalloc(array_size(sizeof(Elf_Shdr), pi->ehdr->e_shnum));
if (!sechdrs)
return -ENOMEM;
memcpy(sechdrs, (void *)pi->ehdr + pi->ehdr->e_shoff,
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 6850ffd69125..8402b3349dca 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -913,7 +913,9 @@ static int __init lock_torture_init(void)
/* Initialize the statistics so that each run gets its own numbers. */
if (nwriters_stress) {
lock_is_write_held = 0;
- cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
+ cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
+ sizeof(*cxt.lwsa),
+ GFP_KERNEL);
if (cxt.lwsa == NULL) {
VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
firsterr = -ENOMEM;
@@ -942,7 +944,9 @@ static int __init lock_torture_init(void)
if (nreaders_stress) {
lock_is_read_held = 0;
- cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
+ cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
+ sizeof(*cxt.lrsa),
+ GFP_KERNEL);
if (cxt.lrsa == NULL) {
VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
firsterr = -ENOMEM;
@@ -985,7 +989,8 @@ static int __init lock_torture_init(void)
}
if (nwriters_stress) {
- writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
+ writer_tasks = kcalloc(cxt.nrealwriters_stress,
+ sizeof(writer_tasks[0]),
GFP_KERNEL);
if (writer_tasks == NULL) {
VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
@@ -995,7 +1000,8 @@ static int __init lock_torture_init(void)
}
if (cxt.cur_ops->readlock) {
- reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]),
+ reader_tasks = kcalloc(cxt.nrealreaders_stress,
+ sizeof(reader_tasks[0]),
GFP_KERNEL);
if (reader_tasks == NULL) {
VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 1efcb5b0c3ed..c2bcf97d24c8 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -698,7 +698,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
goto out_clean;
}
- data = vmalloc(sizeof(*data) * nr_threads);
+ data = vmalloc(array_size(nr_threads, sizeof(*data)));
if (!data) {
pr_err("Failed to allocate LZO data\n");
ret = -ENOMEM;
@@ -1183,14 +1183,14 @@ static int load_image_lzo(struct swap_map_handle *handle,
nr_threads = num_online_cpus() - 1;
nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
- page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
+ page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
if (!page) {
pr_err("Failed to allocate LZO page\n");
ret = -ENOMEM;
goto out_clean;
}
- data = vmalloc(sizeof(*data) * nr_threads);
+ data = vmalloc(array_size(nr_threads, sizeof(*data)));
if (!data) {
pr_err("Failed to allocate LZO data\n");
ret = -ENOMEM;
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index e628fcfd1bde..42fcb7f05fac 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -831,8 +831,9 @@ rcu_torture_cbflood(void *arg)
cbflood_intra_holdoff > 0 &&
cur_ops->call &&
cur_ops->cb_barrier) {
- rhp = vmalloc(sizeof(*rhp) *
- cbflood_n_burst * cbflood_n_per_burst);
+ rhp = vmalloc(array3_size(cbflood_n_burst,
+ cbflood_n_per_burst,
+ sizeof(*rhp)));
err = !rhp;
}
if (err) {
diff --git a/kernel/relay.c b/kernel/relay.c
index c955b10c973c..9f5326e8a036 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -169,7 +169,8 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan)
buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
if (!buf)
return NULL;
- buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
+ buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t *),
+ GFP_KERNEL);
if (!buf->padding)
goto free_buf;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e497c05aab7f..1866e64792a7 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10215,10 +10215,10 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
struct cfs_rq *cfs_rq;
int i;
- tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
+ tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
if (!tg->cfs_rq)
goto err;
- tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
+ tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
if (!tg->se)
goto err;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ef3c4e6f5345..47556b0c9a95 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -183,10 +183,10 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
struct sched_rt_entity *rt_se;
int i;
- tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
+ tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
if (!tg->rt_rq)
goto err;
- tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
+ tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
if (!tg->rt_se)
goto err;
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 61a1125c1ae4..05a831427bc7 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1750,7 +1750,7 @@ cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
int i;
cpumask_var_t *doms;
- doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
+ doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL);
if (!doms)
return NULL;
for (i = 0; i < ndoms; i++) {
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 6a78cf70761d..2d9837c0aff4 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -3047,7 +3047,8 @@ int proc_do_large_bitmap(struct ctl_table *table, int write,
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
- tmp_bitmap = kzalloc(BITS_TO_LONGS(bitmap_len) * sizeof(unsigned long),
+ tmp_bitmap = kcalloc(BITS_TO_LONGS(bitmap_len),
+ sizeof(unsigned long),
GFP_KERNEL);
if (!tmp_bitmap) {
kfree(kbuf);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8d83bcf9ef69..efed9c1cfb7e 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -728,7 +728,7 @@ static int ftrace_profile_init_cpu(int cpu)
*/
size = FTRACE_PROFILE_HASH_SIZE;
- stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
+ stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
if (!stat->hash)
return -ENOMEM;
@@ -6830,9 +6830,10 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
struct task_struct *g, *t;
for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
- ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
- * sizeof(struct ftrace_ret_stack),
- GFP_KERNEL);
+ ret_stack_list[i] =
+ kmalloc_array(FTRACE_RETFUNC_DEPTH,
+ sizeof(struct ftrace_ret_stack),
+ GFP_KERNEL);
if (!ret_stack_list[i]) {
start = 0;
end = i;
@@ -6904,9 +6905,9 @@ static int start_graph_tracing(void)
struct ftrace_ret_stack **ret_stack_list;
int ret, cpu;
- ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
- sizeof(struct ftrace_ret_stack *),
- GFP_KERNEL);
+ ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
+ sizeof(struct ftrace_ret_stack *),
+ GFP_KERNEL);
if (!ret_stack_list)
return -ENOMEM;
@@ -7088,9 +7089,10 @@ void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
ret_stack = per_cpu(idle_ret_stack, cpu);
if (!ret_stack) {
- ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
- * sizeof(struct ftrace_ret_stack),
- GFP_KERNEL);
+ ret_stack =
+ kmalloc_array(FTRACE_RETFUNC_DEPTH,
+ sizeof(struct ftrace_ret_stack),
+ GFP_KERNEL);
if (!ret_stack)
return;
per_cpu(idle_ret_stack, cpu) = ret_stack;
@@ -7109,9 +7111,9 @@ void ftrace_graph_init_task(struct task_struct *t)
if (ftrace_graph_active) {
struct ftrace_ret_stack *ret_stack;
- ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
- * sizeof(struct ftrace_ret_stack),
- GFP_KERNEL);
+ ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
+ sizeof(struct ftrace_ret_stack),
+ GFP_KERNEL);
if (!ret_stack)
return;
graph_init_task(t, ret_stack);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 108ce3e1dc13..c9336e98ac59 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1751,12 +1751,13 @@ static inline void set_cmdline(int idx, const char *cmdline)
static int allocate_cmdlines_buffer(unsigned int val,
struct saved_cmdlines_buffer *s)
{
- s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
- GFP_KERNEL);
+ s->map_cmdline_to_pid = kmalloc_array(val,
+ sizeof(*s->map_cmdline_to_pid),
+ GFP_KERNEL);
if (!s->map_cmdline_to_pid)
return -ENOMEM;
- s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
+ s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
if (!s->saved_cmdlines) {
kfree(s->map_cmdline_to_pid);
return -ENOMEM;
@@ -4360,7 +4361,8 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
if (mask == TRACE_ITER_RECORD_TGID) {
if (!tgid_map)
- tgid_map = kzalloc((PID_MAX_DEFAULT + 1) * sizeof(*tgid_map),
+ tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
+ sizeof(*tgid_map),
GFP_KERNEL);
if (!tgid_map) {
tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
@@ -5063,7 +5065,7 @@ trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
* where the head holds the module and length of array, and the
* tail holds a pointer to the next list.
*/
- map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
+ map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
if (!map_array) {
pr_warn("Unable to allocate trace eval mapping\n");
return;
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 0171407d231f..e1c818dbc0d7 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -436,15 +436,15 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
nr_preds += 2; /* For TRUE and FALSE */
- op_stack = kmalloc(sizeof(*op_stack) * nr_parens, GFP_KERNEL);
+ op_stack = kmalloc_array(nr_parens, sizeof(*op_stack), GFP_KERNEL);
if (!op_stack)
return ERR_PTR(-ENOMEM);
- prog_stack = kmalloc(sizeof(*prog_stack) * nr_preds, GFP_KERNEL);
+ prog_stack = kmalloc_array(nr_preds, sizeof(*prog_stack), GFP_KERNEL);
if (!prog_stack) {
parse_error(pe, -ENOMEM, 0);
goto out_free;
}
- inverts = kmalloc(sizeof(*inverts) * nr_preds, GFP_KERNEL);
+ inverts = kmalloc_array(nr_preds, sizeof(*inverts), GFP_KERNEL);
if (!inverts) {
parse_error(pe, -ENOMEM, 0);
goto out_free;
diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
index 5cadb1b8b5fe..752d8042bad4 100644
--- a/kernel/trace/tracing_map.c
+++ b/kernel/trace/tracing_map.c
@@ -1075,7 +1075,7 @@ int tracing_map_sort_entries(struct tracing_map *map,
struct tracing_map_sort_entry *sort_entry, **entries;
int i, n_entries, ret;
- entries = vmalloc(map->max_elts * sizeof(sort_entry));
+ entries = vmalloc(array_size(sizeof(sort_entry), map->max_elts));
if (!entries)
return -ENOMEM;
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 492c255e6c5a..c3d7583fcd21 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -764,8 +764,9 @@ static int insert_extent(struct uid_gid_map *map, struct uid_gid_extent *extent)
struct uid_gid_extent *forward;
/* Allocate memory for 340 mappings. */
- forward = kmalloc(sizeof(struct uid_gid_extent) *
- UID_GID_MAP_MAX_EXTENTS, GFP_KERNEL);
+ forward = kmalloc_array(UID_GID_MAP_MAX_EXTENTS,
+ sizeof(struct uid_gid_extent),
+ GFP_KERNEL);
if (!forward)
return -ENOMEM;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 465a28b4cd32..78b192071ef7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -5638,7 +5638,7 @@ static void __init wq_numa_init(void)
* available. Build one from cpu_to_node() which should have been
* fully initialized by now.
*/
- tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL);
+ tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
BUG_ON(!tbl);
for_each_node(node)