summaryrefslogtreecommitdiff
path: root/lib/strncpy_from_user.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2020-11-19 10:45:55 -0800
committerAlexei Starovoitov <ast@kernel.org>2020-11-19 11:58:15 -0800
commit14d6d86c210aea1a83c19a8f6391ecabcbefed94 (patch)
tree5eadde2770ff55b99a77d39e56e9b3dc322ce628 /lib/strncpy_from_user.c
parent1fd6cee127e2ddff36d648573d7566aafb0d0b77 (diff)
parentc8a36aedf3e24768e94d87fdcdd37684bd241c44 (diff)
downloadlinux-14d6d86c210aea1a83c19a8f6391ecabcbefed94.tar.gz
linux-14d6d86c210aea1a83c19a8f6391ecabcbefed94.tar.bz2
linux-14d6d86c210aea1a83c19a8f6391ecabcbefed94.zip
Merge branch 'Fix bpf_probe_read_user_str() overcopying'
Daniel Xu says: ==================== 6ae08ae3dea2 ("bpf: Add probe_read_{user, kernel} and probe_read_{user, kernel}_str helpers") introduced a subtle bug where bpf_probe_read_user_str() would potentially copy a few extra bytes after the NUL terminator. This issue is particularly nefarious when strings are used as map keys, as seemingly identical strings can occupy multiple entries in a map. This patchset fixes the issue and introduces a selftest to prevent future regressions. v6 -> v7: * Add comments v5 -> v6: * zero-pad up to sizeof(unsigned long) after NUL v4 -> v5: * don't read potentially uninitialized memory v3 -> v4: * directly pass userspace pointer to prog * test more strings of different length v2 -> v3: * set pid filter before attaching prog in selftest * use long instead of int as bpf_probe_read_user_str() retval * style changes v1 -> v2: * add Fixes: tag * add selftest ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'lib/strncpy_from_user.c')
-rw-r--r--lib/strncpy_from_user.c19
1 files changed, 17 insertions, 2 deletions
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index e6d5fcc2cdf3..122d8d0e253c 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -35,17 +35,32 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src,
goto byte_at_a_time;
while (max >= sizeof(unsigned long)) {
- unsigned long c, data;
+ unsigned long c, data, mask;
/* Fall back to byte-at-a-time if we get a page fault */
unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time);
- *(unsigned long *)(dst+res) = c;
+ /*
+ * Note that we mask out the bytes following the NUL. This is
+ * important to do because string oblivious code may read past
+ * the NUL. For those routines, we don't want to give them
+ * potentially random bytes after the NUL in `src`.
+ *
+ * One example of such code is BPF map keys. BPF treats map keys
+ * as an opaque set of bytes. Without the post-NUL mask, any BPF
+ * maps keyed by strings returned from strncpy_from_user() may
+ * have multiple entries for semantically identical strings.
+ */
if (has_zero(c, &data, &constants)) {
data = prep_zero_mask(c, data, &constants);
data = create_zero_mask(data);
+ mask = zero_bytemask(data);
+ *(unsigned long *)(dst+res) = c & mask;
return res + find_zero(data);
}
+
+ *(unsigned long *)(dst+res) = c;
+
res += sizeof(unsigned long);
max -= sizeof(unsigned long);
}