f2fs-tools:sload.f2fs compression support

commit b585244e72
category: bugfix
issue: #I6VAS0
CVE: NA

Signed-off-by: DongSenhao <dongsenhao2@huawei.com>
---------------------------------------

Add F2FS compression support for sload
* Support file extension filter, either default-accept or default-deny
  policy
* Support choice of compression algorithm, LZO (version 2) or LZ4
  (default)
* Support custom log of cluster size
* Support minimum number of compressed blocks per cluster (default 1).
  A cluster will not be compressed if the number can not be met.
* suuport -r (read-only) option
  This releases compressed blocks to secure free space in advance. Note that,
  all compressed files will have the immutable bit.
* Added manpage update
* Remove unecessary qbuf allocation (Jaegeuk, suggested by Satya)

Signed-off-by: Robin Hsu <robinhsu@google.com>
[Jaegeuk Kim: fix some bugs and refactor names]
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Signed-off-by: dongsenhao <dongsenhao2@huawei.com>

 create mode 100644 fsck/compress.c
 create mode 100644 fsck/compress.h

Signed-off-by: dongsenhao <dongsenhao2@huawei.com>
This commit is contained in:
Robin Hsu 2020-12-08 16:15:54 +08:00 committed by dongsenhao
parent a1879e5af8
commit ecdcd6189d
10 changed files with 728 additions and 27 deletions

View File

@ -52,6 +52,18 @@ AC_PATH_PROG([LDCONFIG], [ldconfig],
[$PATH:/sbin])
# Checks for libraries.
AC_CHECK_LIB([lzo2], [main],
[AC_SUBST([liblzo2_LIBS], ["-llzo2"])
AC_DEFINE([HAVE_LIBLZO2], [1],
[Define if you have liblzo2])
], [], [])
AC_CHECK_LIB([lz4], [main],
[AC_SUBST([liblz4_LIBS], ["-llz4"])
AC_DEFINE([HAVE_LIBLZ4], [1],
[Define if you have liblz4])
], [], [])
PKG_CHECK_MODULES([libuuid], [uuid])
AS_IF([test "x$with_selinux" != "xno"],

View File

@ -3,12 +3,15 @@
AM_CPPFLAGS = ${libuuid_CFLAGS} -I$(top_srcdir)/include
AM_CFLAGS = -Wall
sbin_PROGRAMS = fsck.f2fs
noinst_HEADERS = common.h dict.h dqblk_v2.h f2fs.h fsck.h node.h quotaio.h quotaio_tree.h quotaio_v2.h xattr.h
noinst_HEADERS = common.h dict.h dqblk_v2.h f2fs.h fsck.h node.h quotaio.h \
quotaio_tree.h quotaio_v2.h xattr.h compress.h
include_HEADERS = $(top_srcdir)/include/quota.h
fsck_f2fs_SOURCES = main.c fsck.c dump.c mount.c defrag.c resize.c \
node.c segment.c dir.c sload.c xattr.c \
node.c segment.c dir.c sload.c xattr.c compress.c \
dict.c mkquota.c quotaio.c quotaio_tree.c quotaio_v2.c
fsck_f2fs_LDADD = ${libselinux_LIBS} ${libuuid_LIBS} $(top_builddir)/lib/libf2fs.la
fsck_f2fs_LDADD = ${libselinux_LIBS} ${libuuid_LIBS} \
${liblzo2_LIBS} ${liblz4_LIBS} \
$(top_builddir)/lib/libf2fs.la
install-data-hook:
ln -sf fsck.f2fs $(DESTDIR)/$(sbindir)/dump.f2fs

178
fsck/compress.c Normal file
View File

@ -0,0 +1,178 @@
/**
* compress.c
*
* Copyright (c) 2020 Google Inc.
* Robin Hsu <robinhsu@google.com>
* : add sload compression support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/* for config.h for general environment (non-Android) */
#include "f2fs.h"
#include "compress.h"
#ifdef HAVE_LIBLZO2
#include <lzo/lzo1x.h> /* for lzo1x_1_15_compress() */
#endif
#ifdef HAVE_LIBLZ4
#include <lz4.h> /* for LZ4_compress_fast_extState() */
#endif
/*
* macro/constants borrowed from kernel header (GPL-2.0):
* include/linux/lzo.h, and include/linux/lz4.h
*/
#ifdef HAVE_LIBLZO2
#define lzo1x_worst_compress(x) ((x) + (x) / 16 + 64 + 3 + 2)
#define LZO_WORK_SIZE ALIGN_UP(LZO1X_1_15_MEM_COMPRESS, 8)
#endif
#ifdef HAVE_LIBLZ4
#define LZ4_MEMORY_USAGE 14
#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
#ifndef LZ4_STREAMSIZE
#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(long long))
#endif
#define LZ4_MEM_COMPRESS LZ4_STREAMSIZE
#define LZ4_ACCELERATION_DEFAULT 1
#define LZ4_WORK_SIZE ALIGN_UP(LZ4_MEM_COMPRESS, 8)
#endif
#if defined(HAVE_LIBLZO2) || defined(HAVE_LIBLZ4)
static void reset_cc(struct compress_ctx *cc)
{
memset(cc->rbuf, 0, cc->cluster_size * F2FS_BLKSIZE);
memset(cc->cbuf->cdata, 0, cc->cluster_size * F2FS_BLKSIZE
- F2FS_BLKSIZE);
}
#endif
#ifdef HAVE_LIBLZO2
static void lzo_compress_init(struct compress_ctx *cc)
{
size_t size = cc->cluster_size * F2FS_BLKSIZE;
size_t alloc = size + lzo1x_worst_compress(size)
+ COMPRESS_HEADER_SIZE + LZO_WORK_SIZE;
cc->private = malloc(alloc);
ASSERT(cc->private);
cc->rbuf = (char *) cc->private + LZO_WORK_SIZE;
cc->cbuf = (struct compress_data *)((char *) cc->rbuf + size);
}
static int lzo_compress(struct compress_ctx *cc)
{
int ret = lzo1x_1_15_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
(lzo_uintp)(&cc->clen), cc->private);
cc->cbuf->clen = cpu_to_le32(cc->clen);
return ret;
}
#endif
#ifdef HAVE_LIBLZ4
static void lz4_compress_init(struct compress_ctx *cc)
{
size_t size = cc->cluster_size * F2FS_BLKSIZE;
size_t alloc = size + LZ4_COMPRESSBOUND(size)
+ COMPRESS_HEADER_SIZE + LZ4_WORK_SIZE;
cc->private = malloc(alloc);
ASSERT(cc->private);
cc->rbuf = (char *) cc->private + LZ4_WORK_SIZE;
cc->cbuf = (struct compress_data *)((char *) cc->rbuf + size);
}
static int lz4_compress(struct compress_ctx *cc)
{
cc->clen = LZ4_compress_fast_extState(cc->private, cc->rbuf,
(char *)cc->cbuf->cdata, cc->rlen,
cc->rlen - F2FS_BLKSIZE * c.compress.min_blocks,
LZ4_ACCELERATION_DEFAULT);
if (!cc->clen)
return 1;
cc->cbuf->clen = cpu_to_le32(cc->clen);
return 0;
}
#endif
const char *supported_comp_names[] = {
"lzo",
"lz4",
"",
};
compress_ops supported_comp_ops[] = {
#ifdef HAVE_LIBLZO2
{lzo_compress_init, lzo_compress, reset_cc},
#else
{NULL, NULL, NULL},
#endif
#ifdef HAVE_LIBLZ4
{lz4_compress_init, lz4_compress, reset_cc},
#else
{NULL, NULL, NULL},
#endif
};
/* linked list */
typedef struct _ext_t {
const char *ext;
struct _ext_t *next;
} ext_t;
static ext_t *extension_list;
static bool ext_found(const char *ext)
{
ext_t *p = extension_list;
while (p != NULL && strcmp(ext, p->ext))
p = p->next;
return (p != NULL);
}
static const char *get_ext(const char *path)
{
char *p = strrchr(path, '.');
return p == NULL ? path + strlen(path) : p + 1;
}
static bool ext_do_filter(const char *path)
{
return (ext_found(get_ext(path)) == true) ^
(c.compress.filter == COMPR_FILTER_ALLOW);
}
static void ext_filter_add(const char *ext)
{
ext_t *node;
ASSERT(ext != NULL);
if (ext_found(ext))
return; /* ext was already registered */
node = malloc(sizeof(ext_t));
ASSERT(node != NULL);
node->ext = ext;
node->next = extension_list;
extension_list = node;
}
static void ext_filter_destroy(void)
{
ext_t *p;
while (extension_list != NULL) {
p = extension_list;
extension_list = p->next;
free(p);
}
}
filter_ops ext_filter = {
.add = ext_filter_add,
.destroy = ext_filter_destroy,
.filter = ext_do_filter,
};

22
fsck/compress.h Normal file
View File

@ -0,0 +1,22 @@
/**
* compress.h
*
* Copyright (c) 2020 Google Inc.
* Robin Hsu <robinhsu@google.com>
* : add sload compression support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef COMPRESS_H
#define COMPRESS_H
#include "f2fs_fs.h"
extern const char *supported_comp_names[];
extern compress_ops supported_comp_ops[];
extern filter_ops ext_filter;
#endif /* COMPRESS_H */

View File

@ -282,7 +282,16 @@ block_t new_node_block(struct f2fs_sb_info *,
struct quota_file;
u64 f2fs_quota_size(struct quota_file *);
u64 f2fs_read(struct f2fs_sb_info *, nid_t, u8 *, u64, pgoff_t);
enum wr_addr_type {
WR_NORMAL = 1,
WR_COMPRESS_DATA = 2,
WR_NULL_ADDR = NULL_ADDR, /* 0 */
WR_NEW_ADDR = NEW_ADDR, /* -1U */
WR_COMPRESS_ADDR = COMPRESS_ADDR, /* -2U */
};
u64 f2fs_write(struct f2fs_sb_info *, nid_t, u8 *, u64, pgoff_t);
u64 f2fs_write_compress_data(struct f2fs_sb_info *, nid_t, u8 *, u64, pgoff_t);
u64 f2fs_write_addrtag(struct f2fs_sb_info *, nid_t, pgoff_t, unsigned int);
void f2fs_filesize_update(struct f2fs_sb_info *, nid_t, u64);
int get_dnode_of_data(struct f2fs_sb_info *, struct dnode_of_data *,

View File

@ -13,6 +13,9 @@
* Copyright (c) 2019 Google Inc.
* Robin Hsu <robinhsu@google.com>
* : add cache layer
* Copyright (c) 2020 Google Inc.
* Robin Hsu <robinhsu@google.com>
* : add sload compression support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -25,6 +28,7 @@
#include <getopt.h>
#include <stdbool.h>
#include "quotaio.h"
#include "compress.h"
struct f2fs_fsck gfsck;
@ -134,6 +138,17 @@ void sload_usage()
MSG(0, " -S sparse_mode\n");
MSG(0, " -t mount point [prefix of target fs path, default:/]\n");
MSG(0, " -T timestamp\n");
MSG(0, " -c enable compression (default allow policy)\n");
MSG(0, " ------------ Compression sub-options -----------------\n");
MSG(0, " -L <log-of-blocks-per-cluster>, default 2\n");
MSG(0, " -a <algorithm> compression algorithm, default LZ4\n");
MSG(0, " -x <ext> compress files except for these extensions.\n");
MSG(0, " -i <ext> compress files with these extensions only.\n");
MSG(0, " * -i or -x: use it many times for multiple extensions.\n");
MSG(0, " * -i and -x cannot be used together..\n");
MSG(0, " -m <num> min compressed blocks per cluster\n");
MSG(0, " -r readonly (IMMUTABLE) for compressed files\n");
MSG(0, " ------------------------------------------------------\n");
MSG(0, " -d debug level [default:0]\n");
MSG(0, " -V print the version number and exit\n");
exit(1);
@ -534,7 +549,7 @@ void f2fs_parse_options(int argc, char *argv[])
#endif
} else if (!strcmp("sload.f2fs", prog)) {
#ifdef WITH_SLOAD
const char *option_string = "C:d:f:p:s:St:T:V";
const char *option_string = "cL:a:i:x:m:rC:d:f:p:s:St:T:V";
#ifdef HAVE_LIBSELINUX
int max_nr_opt = (int)sizeof(c.seopt_file) /
sizeof(c.seopt_file[0]);
@ -543,8 +558,83 @@ void f2fs_parse_options(int argc, char *argv[])
char *p;
c.func = SLOAD;
c.compress.cc.log_cluster_size = 2;
c.compress.alg = COMPR_LZ4;
c.compress.min_blocks = 1;
c.compress.filter_ops = &ext_filter;
while ((option = getopt(argc, argv, option_string)) != EOF) {
unsigned int i;
int val;
switch (option) {
case 'c': /* compression support */
c.compress.enabled = true;
break;
case 'L': /* compression: log of blocks-per-cluster */
c.compress.required = true;
val = atoi(optarg);
if (val < MIN_COMPRESS_LOG_SIZE ||
val > MAX_COMPRESS_LOG_SIZE) {
MSG(0, "\tError: log of blocks per"
" cluster must be in the range"
" of %d .. %d.\n",
MIN_COMPRESS_LOG_SIZE,
MAX_COMPRESS_LOG_SIZE);
error_out(prog);
}
c.compress.cc.log_cluster_size = val;
break;
case 'a': /* compression: choose algorithm */
c.compress.required = true;
c.compress.alg = MAX_COMPRESS_ALGS;
for (i = 0; i < MAX_COMPRESS_ALGS; i++) {
if (!strcmp(supported_comp_names[i],
optarg)) {
c.compress.alg = i;
break;
}
}
if (c.compress.alg == MAX_COMPRESS_ALGS) {
MSG(0, "\tError: Unknown compression"
" algorithm %s\n", optarg);
error_out(prog);
}
break;
case 'i': /* compress only these extensions */
c.compress.required = true;
if (c.compress.filter == COMPR_FILTER_ALLOW) {
MSG(0, "\tError: could not mix option"
" -i and -x\n");
error_out(prog);
}
c.compress.filter = COMPR_FILTER_DENY;
c.compress.filter_ops->add(optarg);
break;
case 'x': /* compress except for these extensions */
c.compress.required = true;
if (c.compress.filter == COMPR_FILTER_DENY) {
MSG(0, "\tError: could not mix option"
" -i and -x\n");
error_out(prog);
}
c.compress.filter = COMPR_FILTER_ALLOW;
c.compress.filter_ops->add(optarg);
break;
case 'm': /* minimum compressed blocks per cluster */
c.compress.required = true;
val = atoi(optarg);
if (val <= 0) {
MSG(0, "\tError: minimum compressed"
" blocks per cluster must be"
" positive.\n");
error_out(prog);
}
c.compress.min_blocks = val;
break;
case 'r': /* compress file to set IMMUTABLE */
c.compress.required = true;
c.compress.readonly = true;
break;
case 'C':
c.fs_config_file = absolute_path(optarg);
break;
@ -602,6 +692,27 @@ void f2fs_parse_options(int argc, char *argv[])
if (err != NOERROR)
break;
}
if (c.compress.required && !c.compress.enabled) {
MSG(0, "\tError: compression sub-options are used"
" without the compression enable (-c) option\n"
);
error_out(prog);
}
if (err == NOERROR && c.compress.enabled) {
c.compress.cc.cluster_size = 1
<< c.compress.cc.log_cluster_size;
if (c.compress.filter == COMPR_FILTER_UNASSIGNED)
c.compress.filter = COMPR_FILTER_ALLOW;
if (c.compress.min_blocks >=
c.compress.cc.cluster_size) {
MSG(0, "\tError: minimum reduced blocks by"
" compression per cluster must be at"
" most one less than blocks per"
" cluster, i.e. %d\n",
c.compress.cc.cluster_size - 1);
error_out(prog);
}
}
#endif /* WITH_SLOAD */
}
@ -812,6 +923,30 @@ static int do_resize(struct f2fs_sb_info *sbi)
#endif
#ifdef WITH_SLOAD
static int init_compr(struct f2fs_sb_info *sbi)
{
if (!c.compress.enabled)
return 0;
if (!(sbi->raw_super->feature
& cpu_to_le32(F2FS_FEATURE_COMPRESSION))) {
MSG(0, "Error: Compression (-c) was requested "
"but the file system is not created "
"with such feature.\n");
return -1;
}
if (!supported_comp_ops[c.compress.alg].init) {
MSG(0, "Error: The selected compression algorithm is not"
" supported\n");
return -1;
}
c.compress.ops = supported_comp_ops + c.compress.alg;
c.compress.ops->init(&c.compress.cc);
c.compress.ops->reset(&c.compress.cc);
c.compress.cc.rlen = c.compress.cc.cluster_size * F2FS_BLKSIZE;
return 0;
}
static int do_sload(struct f2fs_sb_info *sbi)
{
if (!c.from_dir) {
@ -821,6 +956,9 @@ static int do_sload(struct f2fs_sb_info *sbi)
if (!c.mount_point)
c.mount_point = "/";
if (init_compr(sbi))
return -1;
return f2fs_sload(sbi);
}
#endif
@ -971,6 +1109,9 @@ retry:
return ret2;
}
if (c.func == SLOAD)
c.compress.filter_ops->destroy();
printf("\nDone: %lf secs\n", (get_boottime_ns() - start) / 1000000000.0);
return ret;

View File

@ -8,6 +8,9 @@
* Hou Pengyang <houpengyang@huawei.com>
* Liu Shuoran <liushuoran@huawei.com>
* Jaegeuk Kim <jaegeuk@kernel.org>
* Copyright (c) 2020 Google Inc.
* Robin Hsu <robinhsu@google.com>
* : add sload compression support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -111,6 +114,8 @@ int new_data_block(struct f2fs_sb_info *sbi, void *block,
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
dn->data_blkaddr = blkaddr;
ret = reserve_new_block(sbi, &dn->data_blkaddr, &sum, type, 0);
if (ret) {
c.alloc_failed = 1;
@ -228,8 +233,14 @@ u64 f2fs_read(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
return read_count;
}
u64 f2fs_write(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
u64 count, pgoff_t offset)
/*
* Do not call this function directly. Instead, call one of the following:
* u64 f2fs_write();
* u64 f2fs_write_compress_data();
* u64 f2fs_write_addrtag();
*/
static u64 f2fs_write_ex(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
u64 count, pgoff_t offset, enum wr_addr_type addr_type)
{
struct dnode_of_data dn;
struct node_info ni;
@ -243,6 +254,19 @@ u64 f2fs_write(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
void* index_node = NULL;
int idirty = 0;
int err;
bool has_data = (addr_type == WR_NORMAL
|| addr_type == WR_COMPRESS_DATA);
if (count == 0)
return 0;
/*
* Enforce calling from f2fs_write(), f2fs_write_compress_data(),
* and f2fs_write_addrtag(). Beside, check if is properly called.
*/
ASSERT((!has_data && buffer == NULL) || (has_data && buffer != NULL));
if (addr_type != WR_NORMAL)
ASSERT(offset % F2FS_BLKSIZE == 0); /* block boundary only */
/* Memory allocation for block buffer and inode. */
blk_buffer = calloc(BLOCK_SZ, 2);
@ -265,15 +289,26 @@ u64 f2fs_write(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
if (err)
break;
idirty |= dn.idirty;
if (index_node)
free(index_node);
free(index_node);
index_node = (dn.node_blk == dn.inode_blk) ?
NULL : dn.node_blk;
NULL : dn.node_blk;
remained_blkentries = ADDRS_PER_PAGE(sbi,
dn.node_blk, dn.inode_blk);
dn.node_blk, dn.inode_blk) -
dn.ofs_in_node;
}
ASSERT(remained_blkentries > 0);
if (!has_data) {
dn.data_blkaddr = addr_type;
set_data_blkaddr(&dn);
idirty |= dn.idirty;
if (dn.ndirty)
ASSERT(dev_write_block(dn.node_blk,
dn.node_blkaddr) >= 0);
written_count = 0;
break;
}
blkaddr = datablock_addr(dn.node_blk, dn.ofs_in_node);
if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
err = new_data_block(sbi, blk_buffer,
@ -281,6 +316,7 @@ u64 f2fs_write(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
if (err)
break;
blkaddr = dn.data_blkaddr;
idirty |= dn.idirty;
}
off_in_blk = offset % BLOCK_SZ;
@ -305,9 +341,10 @@ u64 f2fs_write(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
dn.ofs_in_node++;
if ((--remained_blkentries == 0 || count == 0) && (dn.ndirty))
ASSERT(dev_write_block(dn.node_blk, dn.node_blkaddr) >= 0);
ASSERT(dev_write_block(dn.node_blk, dn.node_blkaddr)
>= 0);
}
if (offset > le64_to_cpu(inode->i.i_size)) {
if (addr_type == WR_NORMAL && offset > le64_to_cpu(inode->i.i_size)) {
inode->i.i_size = cpu_to_le64(offset);
idirty = 1;
}
@ -315,13 +352,33 @@ u64 f2fs_write(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
ASSERT(inode == dn.inode_blk);
ASSERT(write_inode(inode, ni.blk_addr) >= 0);
}
if (index_node)
free(index_node);
free(index_node);
free(blk_buffer);
return written_count;
}
u64 f2fs_write(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
u64 count, pgoff_t offset)
{
return f2fs_write_ex(sbi, ino, buffer, count, offset, WR_NORMAL);
}
u64 f2fs_write_compress_data(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
u64 count, pgoff_t offset)
{
return f2fs_write_ex(sbi, ino, buffer, count, offset, WR_COMPRESS_DATA);
}
u64 f2fs_write_addrtag(struct f2fs_sb_info *sbi, nid_t ino, pgoff_t offset,
unsigned int addrtag)
{
ASSERT(addrtag == COMPRESS_ADDR || addrtag == NEW_ADDR
|| addrtag == NULL_ADDR);
return f2fs_write_ex(sbi, ino, NULL, F2FS_BLKSIZE, offset, addrtag);
}
/* This function updates only inode->i.i_size */
void f2fs_filesize_update(struct f2fs_sb_info *sbi, nid_t ino, u64 filesize)
{
@ -342,11 +399,59 @@ void f2fs_filesize_update(struct f2fs_sb_info *sbi, nid_t ino, u64 filesize)
free(inode);
}
#define MAX_BULKR_RETRY 5
int bulkread(int fd, void *rbuf, size_t rsize, bool *eof)
{
int n = 0;
int retry = MAX_BULKR_RETRY;
int cur;
if (!rsize)
return 0;
if (eof != NULL)
*eof = false;
while (rsize && (cur = read(fd, rbuf, rsize)) != 0) {
if (cur == -1) {
if (errno == EINTR && retry--)
continue;
return -1;
}
retry = MAX_BULKR_RETRY;
rsize -= cur;
n += cur;
}
if (eof != NULL)
*eof = (cur == 0);
return n;
}
u64 f2fs_fix_mutable(struct f2fs_sb_info *sbi, nid_t ino, pgoff_t offset,
unsigned int compressed)
{
unsigned int i;
u64 wlen;
if (c.compress.readonly)
return 0;
for (i = 0; i < compressed - 1; i++) {
wlen = f2fs_write_addrtag(sbi, ino,
offset + (i << F2FS_BLKSIZE_BITS), NEW_ADDR);
if (wlen)
return wlen;
}
return 0;
}
int f2fs_build_file(struct f2fs_sb_info *sbi, struct dentry *de)
{
int fd, n;
pgoff_t off = 0;
u8 buffer[BLOCK_SZ];
struct node_info ni;
struct f2fs_node *node_blk;
if (de->ino == 0)
return -1;
@ -359,8 +464,6 @@ int f2fs_build_file(struct f2fs_sb_info *sbi, struct dentry *de)
/* inline_data support */
if (de->size <= DEF_MAX_INLINE_DATA) {
struct node_info ni;
struct f2fs_node *node_blk;
int ret;
get_node_info(sbi, de->ino, &ni);
@ -385,6 +488,86 @@ int f2fs_build_file(struct f2fs_sb_info *sbi, struct dentry *de)
node_blk->i.i_size = cpu_to_le64(de->size);
ASSERT(write_inode(node_blk, ni.blk_addr) >= 0);
free(node_blk);
#ifdef WITH_SLOAD
} else if (c.func == SLOAD && c.compress.enabled &&
c.compress.filter_ops->filter(de->full_path)) {
bool eof = false;
u8 *rbuf = c.compress.cc.rbuf;
unsigned int cblocks = 0;
node_blk = calloc(BLOCK_SZ, 1);
ASSERT(node_blk);
/* read inode */
get_node_info(sbi, de->ino, &ni);
ASSERT(dev_read_block(node_blk, ni.blk_addr) >= 0);
/* update inode meta */
node_blk->i.i_compress_algrithm = c.compress.alg;
node_blk->i.i_log_cluster_size =
c.compress.cc.log_cluster_size;
node_blk->i.i_flags = cpu_to_le32(
F2FS_COMPR_FL |
(c.compress.readonly ? FS_IMMUTABLE_FL : 0));
ASSERT(write_inode(node_blk, ni.blk_addr) >= 0);
while (!eof && (n = bulkread(fd, rbuf, c.compress.cc.rlen,
&eof)) > 0) {
int ret = c.compress.ops->compress(&c.compress.cc);
u64 wlen;
u32 csize = ALIGN_UP(c.compress.cc.clen +
COMPRESS_HEADER_SIZE, BLOCK_SZ);
unsigned int cur_cblk;
if (ret || n < c.compress.cc.rlen ||
n < (int)(csize + BLOCK_SZ *
c.compress.min_blocks)) {
wlen = f2fs_write(sbi, de->ino, rbuf, n, off);
ASSERT((int)wlen == n);
} else {
wlen = f2fs_write_addrtag(sbi, de->ino, off,
WR_COMPRESS_ADDR);
ASSERT(!wlen);
wlen = f2fs_write_compress_data(sbi, de->ino,
(u8 *)c.compress.cc.cbuf,
csize, off + BLOCK_SZ);
ASSERT(wlen == csize);
c.compress.ops->reset(&c.compress.cc);
cur_cblk = (c.compress.cc.rlen - csize) /
BLOCK_SZ;
cblocks += cur_cblk;
wlen = f2fs_fix_mutable(sbi, de->ino,
off + BLOCK_SZ + csize,
cur_cblk);
ASSERT(!wlen);
}
off += n;
}
if (n == -1) {
fprintf(stderr, "Load file '%s' failed: ",
de->full_path);
perror(NULL);
}
/* read inode */
get_node_info(sbi, de->ino, &ni);
ASSERT(dev_read_block(node_blk, ni.blk_addr) >= 0);
/* update inode meta */
node_blk->i.i_size = cpu_to_le64(off);
if (!c.compress.readonly) {
node_blk->i.i_compr_blocks = cpu_to_le64(cblocks);
node_blk->i.i_blocks += cpu_to_le64(cblocks);
}
ASSERT(write_inode(node_blk, ni.blk_addr) >= 0);
free(node_blk);
if (!c.compress.readonly) {
sbi->total_valid_block_count += cblocks;
if (sbi->total_valid_block_count >=
sbi->user_block_count) {
ERR_MSG("Not enough space\n");
ASSERT(0);
}
}
#endif
} else {
while ((n = read(fd, buffer, BLOCK_SZ)) > 0) {
f2fs_write(sbi, de->ino, buffer, n, off);

View File

@ -5,6 +5,9 @@
* http://www.samsung.com/
* Copyright (c) 2019 Google Inc.
* http://www.google.com/
* Copyright (c) 2020 Google Inc.
* Robin Hsu <robinhsu@google.com>
* : add sload compression support
*
* Dual licensed under the GPL or LGPL version 2 licenses.
*
@ -68,6 +71,10 @@ typedef uint16_t u_int16_t;
typedef uint8_t u_int8_t;
#endif
/* codes from kernel's f2fs.h, GPL-v2.0 */
#define MIN_COMPRESS_LOG_SIZE 2
#define MAX_COMPRESS_LOG_SIZE 8
typedef u_int64_t u64;
typedef u_int32_t u32;
typedef u_int16_t u16;
@ -93,6 +100,31 @@ typedef u32 __be32;
typedef u64 __be64;
#endif
/*
* code borrowed from kernel f2fs dirver: f2fs.h, GPL-2.0
* : definitions of COMPRESS_DATA_RESERVED_SIZE,
* struct compress_data, COMPRESS_HEADER_SIZE,
* and struct compress_ctx
*/
#define COMPRESS_DATA_RESERVED_SIZE 4
struct compress_data {
__le32 clen; /* compressed data size */
__le32 chksum; /* checksum of compressed data */
__le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */
u8 cdata[]; /* compressed data */
};
#define COMPRESS_HEADER_SIZE (sizeof(struct compress_data))
/* compress context */
struct compress_ctx {
unsigned int cluster_size; /* page count in cluster */
unsigned int log_cluster_size; /* log of cluster size */
void *rbuf; /* compression input buffer */
struct compress_data *cbuf; /* comprsssion output header + data */
size_t rlen; /* valid data length in rbuf */
size_t clen; /* valid data length in cbuf */
void *private; /* work buf for compress algorithm */
};
#if HAVE_BYTESWAP_H
#include <byteswap.h>
#else
@ -345,6 +377,47 @@ typedef struct {
bool dbg_en;
} dev_cache_config_t;
/* f2fs_configration for compression used for sload.f2fs */
typedef struct {
void (*init)(struct compress_ctx *cc);
int (*compress)(struct compress_ctx *cc);
void (*reset)(struct compress_ctx *cc);
} compress_ops;
/* Should be aligned to supported_comp_names and support_comp_ops */
enum compress_algorithms {
COMPR_LZO,
COMPR_LZ4,
MAX_COMPRESS_ALGS,
};
enum filter_policy {
COMPR_FILTER_UNASSIGNED = 0,
COMPR_FILTER_ALLOW,
COMPR_FILTER_DENY,
};
typedef struct {
void (*add)(const char *);
void (*destroy)(void);
bool (*filter)(const char *);
} filter_ops;
typedef struct {
bool enabled; /* disabled by default */
bool required; /* require to enable */
bool readonly; /* readonly to release blocks */
struct compress_ctx cc; /* work context */
enum compress_algorithms alg; /* algorithm to compress */
compress_ops *ops; /* ops per algorithm */
unsigned int min_blocks; /* save more blocks than this */
enum filter_policy filter; /* filter to try compression */
filter_ops *filter_ops; /* filter ops */
} compress_config_t;
#define ALIGN_UP(value, size) ((value) + ((value) % (size) > 0 ? \
(size) - (value) % (size) : 0))
struct f2fs_configuration {
u_int32_t reserved_segments;
u_int32_t new_reserved_segments;
@ -441,6 +514,9 @@ struct f2fs_configuration {
/* cache parameters */
dev_cache_config_t cache_config;
/* compression support for sload.f2fs */
compress_config_t compress;
};
#ifdef CONFIG_64BIT
@ -1377,7 +1453,7 @@ int f2fs_reset_zone(int, void *);
extern int f2fs_reset_zones(int);
extern uint32_t f2fs_get_usable_segments(struct f2fs_super_block *sb);
#define SIZE_ALIGN(val, size) ((val) + (size) - 1) / (size)
#define SIZE_ALIGN(val, size) (((val) + (size) - 1) / (size))
#define SEG_ALIGN(blks) SIZE_ALIGN(blks, c.blks_per_seg)
#define ZONE_ALIGN(blks) SIZE_ALIGN(blks, c.blks_per_seg * \
c.segs_per_zone)

View File

@ -5,6 +5,9 @@
* http://www.samsung.com/
* Copyright (c) 2019 Google Inc.
* http://www.google.com/
* Copyright (c) 2020 Google Inc.
* Robin Hsu <robinhsu@google.com>
* : add quick-buffer for sload compression support
*
* Dual licensed under the GPL or LGPL version 2 licenses.
*/

View File

@ -7,22 +7,48 @@ sload.f2fs \- load directories and files into the device directly
.B sload.f2fs
[
.B \-f
.I source directory path
.I source-directory-path
]
[
.B \-t
.I mount point
.I mount-point
]
[
.B \-d
.I debugging-level
]
[
.B \-c
[
.B \-L
.I log-of-blocks-per-cluster
]
[
.B \-a
.I compression-algorithm
]
[
.B \-x
.I file-extension-to-exclude-from-compression
|
.B \-i
.I file-extension-to-include-for-compression
]
[
.B \-m
.I minimum-compressed-blocks-per-cluster
]
[
.B \-r
]
]
.I device
.SH DESCRIPTION
.B sload.f2fs
is used to load directories and files into a disk partition.
\fIdevice\fP is the special file corresponding to the device (e.g.
\fI/dev/sdXX\fP).
is used to load directories and files into a disk partition, or an F2FS
image (file).
\fIdevice\fP could a special file corresponding to the device (e.g.
\fI/dev/sdXX\fP), or an F2FS image file.
.PP
The exit code returned by
@ -30,24 +56,72 @@ The exit code returned by
is 0 on success and -1 on failure.
.SH OPTIONS
.TP
.BI \-f " source directory path"
.BI \-f " source-directory-path"
Specify the source directory path to be loaded.
.TP
.BI \-t " mount point path"
.BI \-t " mount-point-path"
Specify the mount point path in the partition to load.
.TP
.BI \-d " debug-level"
Specify the level of debugging options.
The default number is 0, which shows basic debugging messages.
.TP
.BI \-c
Enable a cluster-based file compression.
The file would be chopped into clusters, and each cluster is compressed
independently.
.TP
.BI \-L " log-of-blocks-per-cluster
Specify cluster size in power of two blocks.
The minimum value is 2 (4 blocks, default).
The maximum value is 8 (256 blocks).
Note that a block contains 4096 bytes.
This option must be used with option \fB\-c\fR.
.TP
.BI \-a " compression-algorithm"
Choose the algorithm for compression. Available options are:
lzo, lz4 (default).
This option must be used with option \fB\-c\fR.
.TP
.BI \-i " file-extension-to-include-for-compression"
Specify a file extension to include for the compression.
To specify multiple file extensions, use multiple option \fB\-i\fR's.
Files having one of the listed extensions will be compressed.
This option must be used with option \fB\-c\fR.
.TP
.BI \-x " file-extension-to-exclude-from-compression"
Specify a file extension to exclude from compression.
To specify multiple file extensions, use multiple option \fB\-x\fR's.
Files having one of the listed extensions won't be compressed.
This option must be used with option \fB\-c\fR.
.TP
.BI \-m " minimum-compressed-blocks-per-cluster"
Specify a minimum block count saved (by compression) per cluster.
The minimum value is 1 (default).
Maximum value is the cluster size in blocks minus 1.
If compression of a cluster fails to save at least the minimum compressed
block count given by the option, the cluster will not be compressed.
This option must be used with option \fB\-c\fR.
.TP
.BI \-r
Specify read-only flag for the compressed files.
It allows filesystem to release compressed space to the users, since, without
this option, filesystem should keep the space for future file updates.
This option must be used with option \fB\-c\fR.
.SH NOTES
If neither \fB\-i\fR nor \fB\-x\fR is used, all files will be compressed.
Obviously, option \fB\-i\fR and \fB-x\fR can not be used together.
.SH AUTHOR
This version of
.B sload.f2fs
has been written by Hou Pengyang <houpengyang@huawei.com>,
Liu Shuoran <liushuoran@huawei.com>, Jaegeuk Kim <jaegeuk@kernel.org>
has been contributed by Hou Pengyang <houpengyang@huawei.com>,
Liu Shuoran <liushuoran@huawei.com>, Jaegeuk Kim <jaegeuk@kernel.org>,
Robin Hsu <robinhsu@google.com>
.SH AVAILABILITY
.B sload.f2fs
is available from git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs-tools.git.
is available from <git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs-tools.git>.
.SH SEE ALSO
.BR mkfs.f2fs(8),
.BR fsck.f2fs(8),