// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* https://www.huawei.com/
* Copyright (C) 2021, Alibaba Cloud
*/
#include <linux/module.h>
#include <linux/buffer_head.h>
#include <linux/statfs.h>
#include <linux/parser.h>
#include <linux/seq_file.h>
#include <linux/crc32c.h>
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/dax.h>
#include <linux/exportfs.h>
#include "xattr.h"
#define CREATE_TRACE_POINTS
#include <trace/events/erofs.h>
static struct kmem_cache *erofs_inode_cachep __read_mostly;
void _erofs_err(struct super_block *sb, const char *function,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
pr_err("(device %s): %s: %pV", sb->s_id, function, &vaf);
va_end(args);
}
void _erofs_info(struct super_block *sb, const char *function,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
pr_info("(device %s): %pV", sb->s_id, &vaf);
va_end(args);
}
static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
{
struct erofs_super_block *dsb;
u32 expected_crc, crc;
dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET,
EROFS_BLKSIZ - EROFS_SUPER_OFFSET, GFP_KERNEL);
if (!dsb)
return -ENOMEM;
expected_crc = le32_to_cpu(dsb->checksum);
dsb->checksum = 0;
/* to allow for x86 boot sectors and other oddities. */
crc = crc32c(~0, dsb, EROFS_BLKSIZ - EROFS_SUPER_OFFSET);
kfree(dsb);
if (crc != expected_crc) {
erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
crc, expected_crc);
return -EBADMSG;
}
return 0;
}
static void erofs_inode_init_once(void *ptr)
{
struct erofs_inode *vi = ptr;
inode_init_once(&vi->vfs_inode);
}
static struct inode *erofs_alloc_inode(struct super_block *sb)
{
struct erofs_inode *vi =
alloc_inode_sb(sb, erofs_inode_cachep, GFP_KERNEL);
if (!vi)
return NULL;
/* zero out everything except vfs_inode */
memset(vi, 0, offsetof(struct erofs_inode, vfs_inode));
return &vi->vfs_inode;
}
static void erofs_free_inode(struct inode *inode)
{
struct erofs_inode *vi = EROFS_I(inode);
/* be careful of RCU symlink path */
if (inode->i_op == &erofs_fast_symlink_iops)
kfree(inode->i_link);
kfree(vi->xattr_shared_xattrs);
kmem_cache_free(erofs_inode_cachep, vi);
}
static bool check_layout_compatibility(struct super_block *sb,
struct erofs_super_block *dsb)
{
const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
EROFS_SB(sb)->feature_incompat = feature;
/* check if current kernel meets all mandatory requirements */
if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
erofs_err(sb,
"unidentified incompatible feature %x, please upgrade kernel version",
feature & ~EROFS_ALL_FEATURE_INCOMPAT);
return false;
}
return true;
}
#ifdef CONFIG_EROFS_FS_ZIP
/* read variable-sized metadata, offset will be aligned by 4-byte */
static void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
erofs_off_t *offset, int *lengthp)
{
u8 *buffer, *ptr;
int len, i, cnt;
*offset = round_up(*offset, 4);
ptr = erofs_read_metabuf(buf, sb, erofs_blknr(*offset), EROFS_KMAP);
if (IS_ERR(ptr))
return ptr;
len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(*offset)]);
if (!len)
len = U16_MAX + 1;
buffer = kmalloc(len, GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
*offset += sizeof(__le16);
*lengthp = len;
for (i