aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2018-05-04 23:41:05 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2018-05-04 23:41:06 +0200
commita5458aa923be8960a78d6fdfa1c6ff769b34deb2 (patch)
tree1e14a76c6a39662d602bf9589a31245ca9d74cdd /kernel/bpf
parentc27638c0628a5507e421f325dae3d3c9a45f227e (diff)
parentab7f5bf0928be2f148d000a6eaa6c0a36e74750e (diff)
downloadkernel_replicant_linux-a5458aa923be8960a78d6fdfa1c6ff769b34deb2.tar.gz
kernel_replicant_linux-a5458aa923be8960a78d6fdfa1c6ff769b34deb2.tar.bz2
kernel_replicant_linux-a5458aa923be8960a78d6fdfa1c6ff769b34deb2.zip
Merge branch 'bpf-event-output-offload'
Jakub Kicinski says: ==================== This series centres on NFP offload of bpf_event_output(). The first patch allows perf event arrays to be used by offloaded programs. Next patch makes the nfp driver keep track of such arrays to be able to filter FW events referring to maps. Perf event arrays are not device bound. Having driver reimplement and manage the perf array seems brittle and unnecessary. Patch 4 moves slightly the verifier step which replaces map fds with map pointers. This is useful for nfp JIT since we can then easily replace host pointers with NFP table ids (patch 6). This allows us to lift the limitation on map helpers having to be used with the same map pointer on all paths. Second use of replacing fds with real host map pointers is that we can use the host map pointer as a key for FW events in perf event array offload. Patch 5 adds perf event output offload support for the NFP. There are some differences between bpf_event_output() offloaded and non-offloaded version. The FW messages which carry events may get dropped and reordered relatively easily. The return codes from the helper are also not guaranteed to match the host. Users are warned about some of those discrepancies with a one time warning message to kernel logs. bpftool gains an ability to dump perf ring events in a very simple format. This was very useful for testing and simple debug, maybe it will be useful to others? Last patch is a trivial comment fix. ==================== Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/core.c1
-rw-r--r--kernel/bpf/offload.c6
-rw-r--r--kernel/bpf/syscall.c2
-rw-r--r--kernel/bpf/verifier.c14
4 files changed, 14 insertions, 9 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 1127552c8033..d0d7d9462368 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1719,6 +1719,7 @@ bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
{
return -ENOTSUPP;
}
+EXPORT_SYMBOL_GPL(bpf_event_output);
/* Always built-in helper functions. */
const struct bpf_func_proto bpf_tail_call_proto = {
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index c9401075b58c..ac747d5cf7c6 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 Netronome Systems, Inc.
+ * Copyright (C) 2017-2018 Netronome Systems, Inc.
*
* This software is licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -474,8 +474,10 @@ bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map)
struct bpf_prog_offload *offload;
bool ret;
- if (!bpf_prog_is_dev_bound(prog->aux) || !bpf_map_is_dev_bound(map))
+ if (!bpf_prog_is_dev_bound(prog->aux))
return false;
+ if (!bpf_map_is_dev_bound(map))
+ return bpf_map_offload_neutral(map);
down_read(&bpf_devs_lock);
offload = prog->aux->offload;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 263e13ede029..9b87198deea2 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -282,6 +282,7 @@ void bpf_map_put(struct bpf_map *map)
{
__bpf_map_put(map, true);
}
+EXPORT_SYMBOL_GPL(bpf_map_put);
void bpf_map_put_with_uref(struct bpf_map *map)
{
@@ -543,6 +544,7 @@ struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
atomic_inc(&map->usercnt);
return map;
}
+EXPORT_SYMBOL_GPL(bpf_map_inc);
struct bpf_map *bpf_map_get_with_uref(u32 ufd)
{
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 37e0affa515e..d5e1a6c4165d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5055,7 +5055,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
/* hold the map. If the program is rejected by verifier,
* the map will be released by release_maps() or it
* will be used by the valid program until it's unloaded
- * and all maps are released in free_bpf_prog_info()
+ * and all maps are released in free_used_maps()
*/
map = bpf_map_inc(map, false);
if (IS_ERR(map)) {
@@ -5741,16 +5741,16 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
env->strict_alignment = true;
+ ret = replace_map_fd_with_map_ptr(env);
+ if (ret < 0)
+ goto skip_full_check;
+
if (bpf_prog_is_dev_bound(env->prog->aux)) {
ret = bpf_prog_offload_verifier_prep(env);
if (ret)
- goto err_unlock;
+ goto skip_full_check;
}
- ret = replace_map_fd_with_map_ptr(env);
- if (ret < 0)
- goto skip_full_check;
-
env->explored_states = kcalloc(env->prog->len,
sizeof(struct bpf_verifier_state_list *),
GFP_USER);
@@ -5821,7 +5821,7 @@ skip_full_check:
err_release_maps:
if (!env->prog->aux->used_maps)
/* if we didn't copy map pointers into bpf_prog_info, release
- * them now. Otherwise free_bpf_prog_info() will release them.
+ * them now. Otherwise free_used_maps() will release them.
*/
release_maps(env);
*prog = env->prog;