diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/bpf/btf.c | 16 | ||||
| -rw-r--r-- | kernel/bpf/cpumask.c | 63 | ||||
| -rw-r--r-- | kernel/bpf/devmap.c | 16 | ||||
| -rw-r--r-- | kernel/bpf/helpers.c | 38 | ||||
| -rw-r--r-- | kernel/bpf/offload.c | 2 | ||||
| -rw-r--r-- | kernel/bpf/preload/bpf_preload_kern.c | 6 | ||||
| -rw-r--r-- | kernel/bpf/preload/iterators/Makefile | 12 | ||||
| -rw-r--r-- | kernel/bpf/preload/iterators/README | 5 | ||||
| -rw-r--r-- | kernel/bpf/preload/iterators/iterators.lskel-big-endian.h | 419 | ||||
| -rw-r--r-- | kernel/bpf/preload/iterators/iterators.lskel-little-endian.h (renamed from kernel/bpf/preload/iterators/iterators.lskel.h) | 0 | ||||
| -rw-r--r-- | kernel/bpf/syscall.c | 23 | ||||
| -rw-r--r-- | kernel/cgroup/rstat.c | 4 | ||||
| -rw-r--r-- | kernel/kexec_core.c | 3 | ||||
| -rw-r--r-- | kernel/trace/bpf_trace.c | 8 |
14 files changed, 530 insertions, 85 deletions
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 5b3cb8068b4d..740bdb045b14 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6453,6 +6453,18 @@ static int __get_type_size(struct btf *btf, u32 btf_id, return -EINVAL; } +static u8 __get_type_fmodel_flags(const struct btf_type *t) +{ + u8 flags = 0; + + if (__btf_type_is_struct(t)) + flags |= BTF_FMODEL_STRUCT_ARG; + if (btf_type_is_signed_int(t)) + flags |= BTF_FMODEL_SIGNED_ARG; + + return flags; +} + int btf_distill_func_proto(struct bpf_verifier_log *log, struct btf *btf, const struct btf_type *func, @@ -6473,6 +6485,7 @@ int btf_distill_func_proto(struct bpf_verifier_log *log, m->arg_flags[i] = 0; } m->ret_size = 8; + m->ret_flags = 0; m->nr_args = MAX_BPF_FUNC_REG_ARGS; return 0; } @@ -6492,6 +6505,7 @@ int btf_distill_func_proto(struct bpf_verifier_log *log, return -EINVAL; } m->ret_size = ret; + m->ret_flags = __get_type_fmodel_flags(t); for (i = 0; i < nargs; i++) { if (i == nargs - 1 && args[i].type == 0) { @@ -6516,7 +6530,7 @@ int btf_distill_func_proto(struct bpf_verifier_log *log, return -EINVAL; } m->arg_size[i] = ret; - m->arg_flags[i] = __btf_type_is_struct(t) ? BTF_FMODEL_STRUCT_ARG : 0; + m->arg_flags[i] = __get_type_fmodel_flags(t); } m->nr_args = nargs; return 0; diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c index 25355a0a367a..52b981512a35 100644 --- a/kernel/bpf/cpumask.c +++ b/kernel/bpf/cpumask.c @@ -48,10 +48,13 @@ __diag_ignore_all("-Wmissing-prototypes", * bpf_cpumask_create() allocates memory using the BPF memory allocator, and * will not block. It may return NULL if no memory is available. */ -struct bpf_cpumask *bpf_cpumask_create(void) +__bpf_kfunc struct bpf_cpumask *bpf_cpumask_create(void) { struct bpf_cpumask *cpumask; + /* cpumask must be the first element so struct bpf_cpumask be cast to struct cpumask. */ + BUILD_BUG_ON(offsetof(struct bpf_cpumask, cpumask) != 0); + cpumask = bpf_mem_alloc(&bpf_cpumask_ma, sizeof(*cpumask)); if (!cpumask) return NULL; @@ -71,7 +74,7 @@ struct bpf_cpumask *bpf_cpumask_create(void) * must either be embedded in a map as a kptr, or freed with * bpf_cpumask_release(). */ -struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) +__bpf_kfunc struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) { refcount_inc(&cpumask->usage); return cpumask; @@ -87,7 +90,7 @@ struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) * kptr, or freed with bpf_cpumask_release(). This function may return NULL if * no BPF cpumask was found in the specified map value. */ -struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp) +__bpf_kfunc struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp) { struct bpf_cpumask *cpumask; @@ -113,7 +116,7 @@ struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp) * reference of the BPF cpumask has been released, it is subsequently freed in * an RCU callback in the BPF memory allocator. */ -void bpf_cpumask_release(struct bpf_cpumask *cpumask) +__bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask) { if (!cpumask) return; @@ -132,7 +135,7 @@ void bpf_cpumask_release(struct bpf_cpumask *cpumask) * Find the index of the first nonzero bit of the cpumask. A struct bpf_cpumask * pointer may be safely passed to this function. */ -u32 bpf_cpumask_first(const struct cpumask *cpumask) +__bpf_kfunc u32 bpf_cpumask_first(const struct cpumask *cpumask) { return cpumask_first(cpumask); } @@ -145,7 +148,7 @@ u32 bpf_cpumask_first(const struct cpumask *cpumask) * Find the index of the first unset bit of the cpumask. A struct bpf_cpumask * pointer may be safely passed to this function. */ -u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) +__bpf_kfunc u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) { return cpumask_first_zero(cpumask); } @@ -155,7 +158,7 @@ u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) * @cpu: The CPU to be set in the cpumask. * @cpumask: The BPF cpumask in which a bit is being set. */ -void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) +__bpf_kfunc void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) { if (!cpu_valid(cpu)) return; @@ -168,7 +171,7 @@ void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) * @cpu: The CPU to be cleared from the cpumask. * @cpumask: The BPF cpumask in which a bit is being cleared. */ -void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) +__bpf_kfunc void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) { if (!cpu_valid(cpu)) return; @@ -185,7 +188,7 @@ void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) * * true - @cpu is set in the cpumask * * false - @cpu was not set in the cpumask, or @cpu is an invalid cpu. */ -bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) +__bpf_kfunc bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) { if (!cpu_valid(cpu)) return false; @@ -202,7 +205,7 @@ bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) * * true - @cpu is set in the cpumask * * false - @cpu was not set in the cpumask, or @cpu is invalid. */ -bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) +__bpf_kfunc bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) { if (!cpu_valid(cpu)) return false; @@ -220,7 +223,7 @@ bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) * * true - @cpu is set in the cpumask * * false - @cpu was not set in the cpumask, or @cpu is invalid. */ -bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) +__bpf_kfunc bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) { if (!cpu_valid(cpu)) return false; @@ -232,7 +235,7 @@ bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) * bpf_cpumask_setall() - Set all of the bits in a BPF cpumask. * @cpumask: The BPF cpumask having all of its bits set. */ -void bpf_cpumask_setall(struct bpf_cpumask *cpumask) +__bpf_kfunc void bpf_cpumask_setall(struct bpf_cpumask *cpumask) { cpumask_setall((struct cpumask *)cpumask); } @@ -241,7 +244,7 @@ void bpf_cpumask_setall(struct bpf_cpumask *cpumask) * bpf_cpumask_clear() - Clear all of the bits in a BPF cpumask. * @cpumask: The BPF cpumask being cleared. */ -void bpf_cpumask_clear(struct bpf_cpumask *cpumask) +__bpf_kfunc void bpf_cpumask_clear(struct bpf_cpumask *cpumask) { cpumask_clear((struct cpumask *)cpumask); } @@ -258,9 +261,9 @@ void bpf_cpumask_clear(struct bpf_cpumask *cpumask) * * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. */ -bool bpf_cpumask_and(struct bpf_cpumask *dst, - const struct cpumask *src1, - const struct cpumask *src2) +__bpf_kfunc bool bpf_cpumask_and(struct bpf_cpumask *dst, + const struct cpumask *src1, + const struct cpumask *src2) { return cpumask_and((struct cpumask *)dst, src1, src2); } @@ -273,9 +276,9 @@ bool bpf_cpumask_and(struct bpf_cpumask *dst, * * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. */ -void bpf_cpumask_or(struct bpf_cpumask *dst, - const struct cpumask *src1, - const struct cpumask *src2) +__bpf_kfunc void bpf_cpumask_or(struct bpf_cpumask *dst, + const struct cpumask *src1, + const struct cpumask *src2) { cpumask_or((struct cpumask *)dst, src1, src2); } @@ -288,9 +291,9 @@ void bpf_cpumask_or(struct bpf_cpumask *dst, * * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. */ -void bpf_cpumask_xor(struct bpf_cpumask *dst, - const struct cpumask *src1, - const struct cpumask *src2) +__bpf_kfunc void bpf_cpumask_xor(struct bpf_cpumask *dst, + const struct cpumask *src1, + const struct cpumask *src2) { cpumask_xor((struct cpumask *)dst, src1, src2); } @@ -306,7 +309,7 @@ void bpf_cpumask_xor(struct bpf_cpumask *dst, * * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. */ -bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) +__bpf_kfunc bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) { return cpumask_equal(src1, src2); } @@ -322,7 +325,7 @@ bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) * * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. */ -bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) +__bpf_kfunc bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) { return cpumask_intersects(src1, src2); } @@ -338,7 +341,7 @@ bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *sr * * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. */ -bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) +__bpf_kfunc bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) { return cpumask_subset(src1, src2); } @@ -353,7 +356,7 @@ bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) * * A struct bpf_cpumask pointer may be safely passed to @cpumask. */ -bool bpf_cpumask_empty(const struct cpumask *cpumask) +__bpf_kfunc bool bpf_cpumask_empty(const struct cpumask *cpumask) { return cpumask_empty(cpumask); } @@ -368,7 +371,7 @@ bool bpf_cpumask_empty(const struct cpumask *cpumask) * * A struct bpf_cpumask pointer may be safely passed to @cpumask. */ -bool bpf_cpumask_full(const struct cpumask *cpumask) +__bpf_kfunc bool bpf_cpumask_full(const struct cpumask *cpumask) { return cpumask_full(cpumask); } @@ -380,7 +383,7 @@ bool bpf_cpumask_full(const struct cpumask *cpumask) * * A struct bpf_cpumask pointer may be safely passed to @src. */ -void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) +__bpf_kfunc void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) { cpumask_copy((struct cpumask *)dst, src); } @@ -395,7 +398,7 @@ void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) * * A struct bpf_cpumask pointer may be safely passed to @src. */ -u32 bpf_cpumask_any(const struct cpumask *cpumask) +__bpf_kfunc u32 bpf_cpumask_any(const struct cpumask *cpumask) { return cpumask_any(cpumask); } @@ -412,7 +415,7 @@ u32 bpf_cpumask_any(const struct cpumask *cpumask) * * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. */ -u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2) +__bpf_kfunc u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2) { return cpumask_any_and(src1, src2); } diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index d01e4c55b376..2675fefc6cb6 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -474,7 +474,11 @@ static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, { int err; - if (!dev->netdev_ops->ndo_xdp_xmit) + if (!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT)) + return -EOPNOTSUPP; + + if (unlikely(!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) && + xdp_frame_has_frags(xdpf))) return -EOPNOTSUPP; err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf)); @@ -532,8 +536,14 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf) { - if (!obj || - !obj->dev->netdev_ops->ndo_xdp_xmit) + if (!obj) + return false; + + if (!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT)) + return false; + + if (unlikely(!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) && + xdp_frame_has_frags(xdpf))) return false; if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf))) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 458db2db2f81..2dae44581922 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1776,7 +1776,7 @@ __diag_push(); __diag_ignore_all("-Wmissing-prototypes", "Global functions as their definitions will be in vmlinux BTF"); -void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) +__bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) { struct btf_struct_meta *meta = meta__ign; u64 size = local_type_id__k; @@ -1790,7 +1790,7 @@ void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) return p; } -void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) +__bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) { struct btf_struct_meta *meta = meta__ign; void *p = p__alloc; @@ -1811,12 +1811,12 @@ static void __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *hea tail ? list_add_tail(n, h) : list_add(n, h); } -void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node) +__bpf_kfunc void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node) { return __bpf_list_add(node, head, false); } -void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node) +__bpf_kfunc void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node) { return __bpf_list_add(node, head, true); } @@ -1834,12 +1834,12 @@ static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tai return (struct bpf_list_node *)n; } -struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) +__bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) { return __bpf_list_del(head, false); } -struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) +__bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) { return __bpf_list_del(head, true); } @@ -1850,7 +1850,7 @@ struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) * bpf_task_release(). * @p: The task on which a reference is being acquired. */ -struct task_struct *bpf_task_acquire(struct task_struct *p) +__bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p) { return get_task_struct(p); } @@ -1861,7 +1861,7 @@ struct task_struct *bpf_task_acquire(struct task_struct *p) * released by calling bpf_task_release(). * @p: The task on which a reference is being acquired. */ -struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p) +__bpf_kfunc struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p) { /* For the time being this function returns NULL, as it's not currently * possible to safely acquire a reference to a task with RCU protection @@ -1913,7 +1913,7 @@ struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p) * be released by calling bpf_task_release(). * @pp: A pointer to a task kptr on which a reference is being acquired. */ -struct task_struct *bpf_task_kptr_get(struct task_struct **pp) +__bpf_kfunc struct task_struct *bpf_task_kptr_get(struct task_struct **pp) { /* We must return NULL here until we have clarity on how to properly * leverage RCU for ensuring a task's lifetime. See the comment above @@ -1926,7 +1926,7 @@ struct task_struct *bpf_task_kptr_get(struct task_struct **pp) * bpf_task_release - Release the reference acquired on a task. * @p: The task on which a reference is being released. */ -void bpf_task_release(struct task_struct *p) +__bpf_kfunc void bpf_task_release(struct task_struct *p) { if (!p) return; @@ -1941,7 +1941,7 @@ void bpf_task_release(struct task_struct *p) * calling bpf_cgroup_release(). * @cgrp: The cgroup on which a reference is being acquired. */ -struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) +__bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) { cgroup_get(cgrp); return cgrp; @@ -1953,7 +1953,7 @@ struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) * be released by calling bpf_cgroup_release(). * @cgrpp: A pointer to a cgroup kptr on which a reference is being acquired. */ -struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp) +__bpf_kfunc struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp) { struct cgroup *cgrp; @@ -1985,7 +1985,7 @@ struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp) * drops to 0. * @cgrp: The cgroup on which a reference is being released. */ -void bpf_cgroup_release(struct cgroup *cgrp) +__bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp) { if (!cgrp) return; @@ -2000,7 +2000,7 @@ void bpf_cgroup_release(struct cgroup *cgrp) * @cgrp: The cgroup for which we're performing a lookup. * @level: The level of ancestor to look up. */ -struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) +__bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) { struct cgroup *ancestor; @@ -2019,7 +2019,7 @@ struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) * stored in a map, or released with bpf_task_release(). * @pid: The pid of the task being looked up. */ -struct task_struct *bpf_task_from_pid(s32 pid) +__bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) { struct task_struct *p; @@ -2032,22 +2032,22 @@ struct task_struct *bpf_task_from_pid(s32 pid) return p; } -void *bpf_cast_to_kern_ctx(void *obj) +__bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj) { return obj; } -void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k) +__bpf_kfunc void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k) { return obj__ign; } -void bpf_rcu_read_lock(void) +__bpf_kfunc void bpf_rcu_read_lock(void) { rcu_read_lock(); } -void bpf_rcu_read_unlock(void) +__bpf_kfunc void bpf_rcu_read_unlock(void) { rcu_read_unlock(); } diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 88aae38fde66..0c85e06f7ea7 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -136,7 +136,7 @@ static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap) { WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE)); /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */ - bpf_map_free_id(&offmap->map, true); + bpf_map_free_id(&offmap->map); list_del_init(&offmap->offloads); offmap->netdev = NULL; } diff --git a/kernel/bpf/preload/bpf_preload_kern.c b/kernel/bpf/preload/bpf_preload_kern.c index 5106b5372f0c..b56f9f3314fd 100644 --- a/kernel/bpf/preload/bpf_preload_kern.c +++ b/kernel/bpf/preload/bpf_preload_kern.c @@ -3,7 +3,11 @@ #include <linux/init.h> #include <linux/module.h> #include "bpf_preload.h" -#include "iterators/iterators.lskel.h" +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#include "iterators/iterators.lskel-little-endian.h" +#else +#include "iterators/iterators.lskel-big-endian.h" +#endif static struct bpf_link *maps_link, *progs_link; static struct iterators_bpf *skel; diff --git a/kernel/bpf/preload/iterators/Makefile b/kernel/bpf/preload/iterators/Makefile index 6762b1260f2f..8937dc6bc8d0 100644 --- a/kernel/bpf/preload/iterators/Makefile +++ b/kernel/bpf/preload/iterators/Makefile @@ -35,20 +35,22 @@ endif .PHONY: all clean -all: iterators.lskel.h +all: iterators.lskel-little-endian.h + +big: iterators.lskel-big-endian.h clean: $(call msg,CLEAN) $(Q)rm -rf $(OUTPUT) iterators -iterators.lskel.h: $(OUTPUT)/iterators.bpf.o | $(BPFTOOL) +iterators.lskel-%.h: $(OUTPUT)/%/iterators.bpf.o | $(BPFTOOL) $(call msg,GEN-SKEL,$@) $(Q)$(BPFTOOL) gen skeleton -L $< > $@ - -$(OUTPUT)/iterators.bpf.o: iterators.bpf.c $(BPFOBJ) | $(OUTPUT) +$(OUTPUT)/%/iterators.bpf.o: iterators.bpf.c $(BPFOBJ) | $(OUTPUT) $(call msg,BPF,$@) - $(Q)$(CLANG) -g -O2 -target bpf $(INCLUDES) \ + $(Q)mkdir -p $(@D) + $(Q)$(CLANG) -g -O2 -target bpf -m$* $(INCLUDES) \ -c $(filter %.c,$^) -o $@ && \ $(LLVM_STRIP) -g $@ diff --git a/kernel/bpf/preload/iterators/README b/kernel/bpf/preload/iterators/README index 7fd6d39a9ad2..98e7c90ea012 100644 --- a/kernel/bpf/preload/iterators/README +++ b/kernel/bpf/preload/iterators/README @@ -1,4 +1,7 @@ WARNING: -If you change "iterators.bpf.c" do "make -j" in this directory to rebuild "iterators.skel.h". +If you change "iterators.bpf.c" do "make -j" in this directory to +rebuild "iterators.lskel-little-endian.h". Then, on a big-endian +machine, do "make -j big" in this directory to rebuild +"iterators.lskel-big-endian.h". Commit both resulting headers. Make sure to have clang 10 installed. See Documentation/bpf/bpf_devel_QA.rst diff --git a/kernel/bpf/preload/iterators/iterators.lskel-big-endian.h b/kernel/bpf/preload/iterators/iterators.lskel-big-endian.h new file mode 100644 index 000000000000..ebdc6c0cdb70 --- /dev/null +++ b/kernel/bpf/preload/iterators/iterators.lskel-big-endian.h @@ -0,0 +1,419 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +/* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ +#ifndef __ITERATORS_BPF_SKEL_H__ +#define __ITERATORS_BPF_SKEL_H__ + +#include <bpf/skel_internal.h> + +struct iterators_bpf { + struct bpf_loader_ctx ctx; + struct { + struct bpf_map_desc rodata; + } maps; + struct { + struct bpf_prog_desc dump_bpf_map; + struct bpf_prog_desc dump_bpf_prog; + } progs; + struct { + int dump_bpf_map_fd; + int dump_bpf_prog_fd; + } links; +}; + +static inline int +iterators_bpf__dump_bpf_map__attach(struct iterators_bpf *skel) +{ + int prog_fd = skel->progs.dump_bpf_map.prog_fd; + int fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER); + + if (fd > 0) + skel->links.dump_bpf_map_fd = fd; + return fd; +} + +static inline int +iterators_bpf__dump_bpf_prog__attach(struct iterators_bpf *skel) +{ + int prog_fd = skel->progs.dump_bpf_prog.prog_fd; + int fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER); + + if (fd > 0) + skel->links.dump_bpf_prog_fd = fd; + return fd; +} + +static inline int +iterators_bpf__attach(struct iterators_bpf *skel) +{ + int ret = 0; + + ret = ret < 0 ? ret : iterators_bpf__dump_bpf_map__attach(skel); + ret = ret < 0 ? ret : iterators_bpf__dump_bpf_prog__attach(skel); + return ret < 0 ? ret : 0; +} + +static inline void +iterators_bpf__detach(struct iterators_bpf *skel) +{ + skel_closenz(skel->links.dump_bpf_map_fd); + skel_closenz(skel->links.dump_bpf_prog_fd); +} +static void +iterators_bpf__destroy(struct iterators_bpf *skel) +{ + if (!skel) + return; + iterators_bpf__detach(skel); + skel_closenz(skel->progs.dump_bpf_map.prog_fd); + skel_closenz(skel->progs.dump_bpf_prog.prog_fd); + skel_closenz(skel->maps.rodata.map_fd); + skel_free(skel); +} +static inline struct iterators_bpf * +iterators_bpf__open(void) +{ + struct iterators_bpf *skel; + + skel = skel_alloc(sizeof(*skel)); + if (!skel) + goto cleanup; + skel->ctx.sz = (void *)&skel->links - (void *)skel; + return skel; +cleanup: + iterators_bpf__destroy(skel); + return NULL; +} + +static inline int +iterators_bpf__load(struct iterators_bpf *skel) +{ + struct bpf_load_and_run_opts opts = {}; + int err; + + opts.ctx = (struct bpf_loader_ctx *)skel; + opts.data_sz = 6008; + opts.data = (void *)"\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xeb\x9f\x01\0\ +\0\0\0\x18\0\0\0\0\0\0\x04\x1c\0\0\x04\x1c\0\0\x05\x18\0\0\0\0\x02\0\0\0\0\0\0\ +\x02\0\0\0\x01\x04\0\0\x02\0\0\0\x10\0\0\0\x13\0\0\0\x03\0\0\0\0\0\0\0\x18\0\0\ +\0\x04\0\0\0\x40\0\0\0\0\x02\0\0\0\0\0\0\x08\0\0\0\0\x02\0\0\0\0\0\0\x0d\0\0\0\ +\0\x0d\0\0\x01\0\0\0\x06\0\0\0\x1c\0\0\0\x01\0\0\0\x20\x01\0\0\0\0\0\0\x04\x01\ +\0\0\x20\0\0\0\x24\x0c\0\0\x01\0\0\0\x05\0\0\0\xc2\x04\0\0\x03\0\0\0\x18\0\0\0\ +\xd0\0\0\0\x09\0\0\0\0\0\0\0\xd4\0\0\0\x0b\0\0\0\x40\0\0\0\xdf\0\0\0\x0b\0\0\0\ +\x80\0\0\0\0\x02\0\0\0\0\0\0\x0a\0\0\0\xe7\x07\0\0\0\0\0\0\0\0\0\0\xf0\x08\0\0\ +\0\0\0\0\x0c\0\0\0\xf6\x01\0\0\0\0\0\0\x08\0\0\0\x40\0\0\x01\xb3\x04\0\0\x03\0\ +\0\0\x18\0\0\x01\xbb\0\0\0\x0e\0\0\0\0\0\0\x01\xbe\0\0\0\x11\0\0\0\x20\0\0\x01\ +\xc3\0\0\0\x0e\0\0\0\xa0\0\0\x01\xcf\x08\0\0\0\0\0\0\x0f\0\0\x01\xd5\x01\0\0\0\ +\0\0\0\x04\0\0\0\x20\0\0\x01\xe2\x01\0\0\0\0\0\0\x01\x01\0\0\x08\0\0\0\0\x03\0\ +\0\0\0\0\0\0\0\0\0\x10\0\0\0\x12\0\0\0\x10\0\0\x01\xe7\x01\0\0\0\0\0\0\x04\0\0\ +\0\x20\0\0\0\0\x02\0\0\0\0\0\0\x14\0\0\x02\x4b\x04\0\0\x02\0\0\0\x10\0\0\0\x13\ +\0\0\0\x03\0\0\0\0\0\0\x02\x5e\0\0\0\x15\0\0\0\x40\0\0\0\0\x02\0\0\0\0\0\0\x18\ +\0\0\0\0\x0d\0\0\x01\0\0\0\x06\0\0\0\x1c\0\0\0\x13\0\0\x02\x63\x0c\0\0\x01\0\0\ +\0\x16\0\0\x02\xaf\x04\0\0\x01\0\0\0\x08\0\0\x02\xb8\0\0\0\x19\0\0\0\0\0\0\0\0\ +\x02\0\0\0\0\0\0\x1a\0\0\x03\x09\x04\0\0\x06\0\0\0\x38\0\0\x01\xbb\0\0\0\x0e\0\ +\0\0\0\0\0\x01\xbe\0\0\0\x11\0\0\0\x20\0\0\x03\x16\0\0\0\x1b\0\0\0\xc0\0\0\x03\ +\x27\0\0\0\x15\0\0\x01\0\0\0\x03\x30\0\0\0\x1d\0\0\x01\x40\0\0\x03\x3a\0\0\0\ +\x1e\0\0\x01\x80\0\0\0\0\x02\0\0\0\0\0\0\x1c\0\0\0\0\x0a\0\0\0\0\0\0\x10\0\0\0\ +\0\x02\0\0\0\0\0\0\x1f\0\0\0\0\x02\0\0\0\0\0\0\x20\0\0\x03\x84\x04\0\0\x02\0\0\ +\0\x08\0\0\x03\x92\0\0\0\x0e\0\0\0\0\0\0\x03\x9b\0\0\0\x0e\0\0\0\x20\0\0\x03\ +\x3a\x04\0\0\x03\0\0\0\x18\0\0\x03\xa5\0\0\0\x1b\0\0\0\0\0\0\x03\xad\0\0\0\x21\ +\0\0\0\x40\0\0\x03\xb3\0\0\0\x23\0\0\0\x80\0\0\0\0\x02\0\0\0\0\0\0\x22\0\0\0\0\ +\x02\0\0\0\0\0\0\x24\0\0\x03\xb7\x04\0\0\x01\0\0\0\x04\0\0\x03\xc2\0\0\0\x0e\0\ +\0\0\0\0\0\x04\x2b\x04\0\0\x01\0\0\0\x04\0\0\x04\x34\0\0\0\x0e\0\0\0\0\0\0\0\0\ +\x03\0\0\0\0\0\0\0\0\0\0\x1c\0\0\0\x12\0\0\0\x23\0\0\x04\xaa\x0e\0\0\0\0\0\0\ +\x25\0\0\0\0\0\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\x1c\0\0\0\x12\0\0\0\x0e\0\0\x04\ +\xbe\x0e\0\0\0\0\0\0\x27\0\0\0\0\0\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\x1c\0\0\0\x12\ +\0\0\0\x20\0\0\x04\xd4\x0e\0\0\0\0\0\0\x29\0\0\0\0\0\0\0\0\x03\0\0\0\0\0\0\0\0\ +\0\0\x1c\0\0\0\x12\0\0\0\x11\0\0\x04\xe9\x0e\0\0\0\0\0\0\x2b\0\0\0\0\0\0\0\0\ +\x03\0\0\0\0\0\0\0\0\0\0\x10\0\0\0\x12\0\0\0\x04\0\0\x05\0\x0e\0\0\0\0\0\0\x2d\ +\0\0\0\x01\0\0\x05\x08\x0f\0\0\x04\0\0\0\x62\0\0\0\x26\0\0\0\0\0\0\0\x23\0\0\0\ +\x28\0\0\0\x23\0\0\0\x0e\0\0\0\x2a\0\0\0\x31\0\0\0\x20\0\0\0\x2c\0\0\0\x51\0\0\ +\0\x11\0\0\x05\x10\x0f\0\0\x01\0\0\0\x04\0\0\0\x2e\0\0\0\0\0\0\0\x04\0\x62\x70\ +\x66\x5f\x69\x74\x65\x72\x5f\x5f\x62\x70\x66\x5f\x6d\x61\x70\0\x6d\x65\x74\x61\ +\0\x6d\x61\x70\0\x63\x74\x78\0\x69\x6e\x74\0\x64\x75\x6d\x70\x5f\x62\x70\x66\ +\x5f\x6d\x61\x70\0\x69\x74\x65\x72\x2f\x62\x70\x66\x5f\x6d\x61\x70\0\x30\x3a\ +\x30\0\x2f\x68\x6f\x6d\x65\x2f\x69\x69\x69\x2f\x6c\x69\x6e\x75\x78\x2d\x6b\x65\ +\x72\x6e\x65\x6c\x2d\x74\x6f\x6f\x6c\x63\x68\x61\x69\x6e\x2f\x73\x72\x63\x2f\ +\x6c\x69\x6e\x75\x78\x2f\x6b\x65\x72\x6e\x65\x6c\x2f\x62\x70\x66\x2f\x70\x72\ +\x65\x6c\x6f\x61\x64\x2f\x69\x74\x65\x72\x61\x74\x6f\x72\x73\x2f\x69\x74\x65\ +\x72\x61\x74\x6f\x72\x73\x2e\x62\x70\x66\x2e\x63\0\x09\x73\x74\x72\x75\x63\x74\ +\x20\x73\x65\x71\x5f\x66\x69\x6c\x65\x20\x2a\x73\x65\x71\x20\x3d\x20\x63\x74\ +\x78\x2d\x3e\x6d\x65\x74\x61\x2d\x3e\x73\x65\x71\x3b\0\x62\x70\x66\x5f\x69\x74\ +\x65\x72\x5f\x6d\x65\x74\x61\0\x73\x65\x71\0\x73\x65\x73\x73\x69\x6f\x6e\x5f\ +\x69\x64\0\x73\x65\x71\x5f\x6e\x75\x6d\0\x73\x65\x71\x5f\x66\x69\x6c\x65\0\x5f\ +\x5f\x75\x36\x34\0\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\x6c\x6f\x6e\x67\x20\x6c\ +\x6f\x6e\x67\0\x30\x3a\x31\0\x09\x73\x74\x72\x75\x63\x74\x20\x62\x70\x66\x5f\ +\x6d\x61\x70\x20\x2a\x6d\x61\x70\x20\x3d\x20\x63\x74\x78\x2d\x3e\x6d\x61\x70\ +\x3b\0\x09\x69\x66\x20\x28\x21\x6d\x61\x70\x29\0\x30\x3a\x32\0\x09\x5f\x5f\x75\ +\x36\x34\x20\x73\x65\x71\x5f\x6e\x75\x6d\x20\x3d\x20\x63\x74\x78\x2d\x3e\x6d\ +\x65\x74\x61\x2d\x3e\x73\x65\x71\x5f\x6e\x75\x6d\x3b\0\x09\x69\x66\x20\x28\x73\ +\x65\x71\x5f\x6e\x75\x6d\x20\x3d\x3d\x20\x30\x29\0\x09\x09\x42\x50\x46\x5f\x53\ +\x45\x51\x5f\x50\x52\x49\x4e\x54\x46\x28\x73\x65\x71\x2c\x20\x22\x20\x20\x69\ +\x64\x20\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ +\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\x5c\x6e\x22\x29\x3b\0\x62\x70\x66\ +\x5f\x6d\x61\x70\0\x69\x64\0\x6e\x61\x6d\x65\0\x6d\x61\x78\x5f\x65\x6e\x74\x72\ +\x69\x65\x73\0\x5f\x5f\x75\x33\x32\0\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\x69\ +\x6e\x74\0\x63\x68\x61\x72\0\x5f\x5f\x41\x52\x52\x41\x59\x5f\x53\x49\x5a\x45\ +\x5f\x54\x59\x50\x45\x5f\x5f\0\x09\x42\x50\x46\x5f\x53\x45\x51\x5f\x50\x52\x49\ +\x4e\x54\x46\x28\x73\x65\x71\x2c\x20\x22\x25\x34\x75\x20\x25\x2d\x31\x36\x73\ +\x25\x36\x64\x5c\x6e\x22\x2c\x20\x6d\x61\x70\x2d\x3e\x69\x64\x2c\x20\x6d\x61\ +\x70\x2d\x3e\x6e\x61\x6d\x65\x2c\x20\x6d\x61\x70\x2d\x3e\x6d\x61\x78\x5f\x65\ +\x6e\x74\x72\x69\x65\x73\x29\x3b\0\x7d\0\x62\x70\x66\x5f\x69\x74\x65\x72\x5f\ +\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\0\x70\x72\x6f\x67\0\x64\x75\x6d\x70\x5f\ +\x62\x70\x66\x5f\x70\x72\x6f\x67\0\x69\x74\x65\x72\x2f\x62\x70\x66\x5f\x70\x72\ +\x6f\x67\0\x09\x73\x74\x72\x75\x63\x74\x20\x62\x70\x66\x5f\x70\x72\x6f\x67\x20\ +\x2a\x70\x72\x6f\x67\x20\x3d\x20\x63\x74\x78\x2d\x3e\x70\x72\x6f\x67\x3b\0\x09\ +\x69\x66\x20\x28\x21\x70\x72\x6f\x67\x29\0\x62\x70\x66\x5f\x70\x72\x6f\x67\0\ +\x61\x75\x78\0\x09\x61\x75\x78\x20\x3d\x20\x70\x72\x6f\x67\x2d\x3e\x61\x75\x78\ +\x3b\0\x09\x09\x42\x50\x46\x5f\x53\x45\x51\x5f\x50\x52\x49\x4e\x54\x46\x28\x73\ +\x65\x71\x2c\x20\x22\x20\x20\x69\x64\x20\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x20\x20\x20\x61\x74\x74\x61\x63\x68\x65\x64\x5c\x6e\x22\ +\x29\x3b\0\x62\x70\x66\x5f\x70\x72\x6f\x67\x5f\x61\x75\x78\0\x61\x74\x74\x61\ +\x63\x68\x5f\x66\x75\x6e\x63\x5f\x6e\x61\x6d\x65\0\x64\x73\x74\x5f\x70\x72\x6f\ +\x67\0\x66\x75\x6e\x63\x5f\x69\x6e\x66\x6f\0\x62\x74\x66\0\x09\x42\x50\x46\x5f\ +\x53\x45\x51\x5f\x50\x52\x49\x4e\x54\x46\x28\x73\x65\x71\x2c\x20\x22\x25\x34\ +\x75\x20\x25\x2d\x31\x36\x73\x20\x25\x73\x20\x25\x73\x5c\x6e\x22\x2c\x20\x61\ +\x75\x78\x2d\x3e\x69\x64\x2c\0\x30\x3a\x34\0\x30\x3a\x35\0\x09\x69\x66\x20\x28\ +\x21\x62\x74\x66\x29\0\x62\x70\x66\x5f\x66\x75\x6e\x63\x5f\x69\x6e\x66\x6f\0\ +\x69\x6e\x73\x6e\x5f\x6f\x66\x66\0\x74\x79\x70\x65\x5f\x69\x64\0\x30\0\x73\x74\ +\x72\x69\x6e\x67\x73\0\x74\x79\x70\x65\x73\0\x68\x64\x72\0\x62\x74\x66\x5f\x68\ +\x65\x61\x64\x65\x72\0\x73\x74\x72\x5f\x6c\x65\x6e\0\x09\x74\x79\x70\x65\x73\ +\x20\x3d\x20\x62\x74\x66\x2d\x3e\x74\x79\x70\x65\x73\x3b\0\x09\x62\x70\x66\x5f\ +\x70\x72\x6f\x62\x65\x5f\x72\x65\x61\x64\x5f\x6b\x65\x72\x6e\x65\x6c\x28\x26\ +\x74\x2c\x20\x73\x69\x7a\x65\x6f\x66\x28\x74\x29\x2c\x20\x74\x79\x70\x65\x73\ +\x20\x2b\x20\x62\x74\x66\x5f\x69\x64\x29\x3b\0\x09\x73\x74\x72\x20\x3d\x20\x62\ +\x74\x66\x2d\x3e\x73\x74\x72\x69\x6e\x67\x73\x3b\0\x62\x74\x66\x5f\x74\x79\x70\ +\x65\0\x6e\x61\x6d\x65\x5f\x6f\x66\x66\0\x09\x6e\x61\x6d\x65\x5f\x6f\x66\x66\ +\x20\x3d\x20\x42\x50\x46\x5f\x43\x4f\x52\x45\x5f\x52\x45\x41\x44\x28\x74\x2c\ +\x20\x6e\x61\x6d\x65\x5f\x6f\x66\x66\x29\x3b\0\x30\x3a\x32\x3a\x30\0\x09\x69\ +\x66\x20\x28\x6e\x61\x6d\x65\x5f\x6f\x66\x66\x20\x3e\x3d\x20\x62\x74\x66\x2d\ +\x3e\x68\x64\x72\x2e\x73\x74\x72\x5f\x6c\x65\x6e\x29\0\x09\x72\x65\x74\x75\x72\ +\x6e\x20\x73\x74\x72\x20\x2b\x20\x6e\x61\x6d\x65\x5f\x6f\x66\x66\x3b\0\x30\x3a\ +\x33\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x6d\x61\x70\x2e\x5f\x5f\x5f\x66\x6d\ +\x74\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x6d\x61\x70\x2e\x5f\x5f\x5f\x66\x6d\ +\x74\x2e\x31\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\x2e\x5f\x5f\ +\x5f\x66\x6d\x74\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\x2e\x5f\ +\x5f\x5f\x66\x6d\x74\x2e\x32\0\x4c\x49\x43\x45\x4e\x53\x45\0\x2e\x72\x6f\x64\ |
