2014-02-06 03:54:12 +00:00
|
|
|
/**
|
|
|
|
* libf2fs.c
|
|
|
|
*
|
|
|
|
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
|
|
|
|
* http://www.samsung.com/
|
libf2fs_io: Add user-space cache
Implemented cache options in F2FS configuration 'c' (i.e. user
options):
* use c.cache_config.num_cache_entry to set the number of
cache entries (in block), minimum 1024, or 0 to disable cache.
* use c.cache_config.max_hash_collision to set maximum hash
collision (max 16).
* use c.cavhe_config.dbg_en to enable printing of debug messages.
Cache properties:
* Per block save based (block size = f2fs block size = 4K Bytes)
* Device block indices are used to hash (simple modular hash
function) to cache indices.
* A hash collision (when a block is hashed to an already occupied
cache block) will trigger offset-based relocation to max 16
other places, at first empty slot found policy.
* A hash collision with all 17 possible relocating places occupied
will evict the oldest used one (replaced by the new read block).
A global 'use' tick is kept ticking-up per block read for this
purpose.
* On write cache hit, it will update the corresponding cached
block.
* On write cache miss, the data won't be cached.
* Write-through policy only: never delayed write.
* On read cache hit, it returns the cached block.
* On read cache miss, it issues an I/O read to the kernel, and
then caches and returns the block.
* Currently read ahead is not implemented. Yet, the read ahead
logic is kept as before, i.e. using kernel's read ahead
mechanism.
Signed-off-by: Robin Hsu <robinhsu@google.com>
[Jaegeuk Kim: Fix one bug in dcache_update_rw()]
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-11-18 10:13:07 +00:00
|
|
|
* Copyright (c) 2019 Google Inc.
|
|
|
|
* http://www.google.com/
|
2020-12-08 08:15:54 +00:00
|
|
|
* Copyright (c) 2020 Google Inc.
|
|
|
|
* Robin Hsu <robinhsu@google.com>
|
|
|
|
* : add quick-buffer for sload compression support
|
2014-02-06 03:54:12 +00:00
|
|
|
*
|
2014-04-07 03:10:59 +00:00
|
|
|
* Dual licensed under the GPL or LGPL version 2 licenses.
|
2014-02-06 03:54:12 +00:00
|
|
|
*/
|
2021-12-15 02:45:47 +00:00
|
|
|
#ifndef _LARGEFILE64_SOURCE
|
2014-02-06 03:54:12 +00:00
|
|
|
#define _LARGEFILE64_SOURCE
|
2021-12-15 02:45:47 +00:00
|
|
|
#endif
|
2014-02-06 03:54:12 +00:00
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <fcntl.h>
|
2017-11-14 18:53:32 +00:00
|
|
|
#ifdef HAVE_MNTENT_H
|
2014-02-06 03:54:12 +00:00
|
|
|
#include <mntent.h>
|
2017-11-14 18:53:32 +00:00
|
|
|
#endif
|
2014-02-06 03:54:12 +00:00
|
|
|
#include <time.h>
|
2022-04-21 22:18:29 +00:00
|
|
|
#ifdef HAVE_SYS_STAT_H
|
2014-02-06 03:54:12 +00:00
|
|
|
#include <sys/stat.h>
|
2022-04-21 22:18:29 +00:00
|
|
|
#endif
|
|
|
|
#ifdef HAVE_SYS_MOUNT_H
|
2014-02-06 03:54:12 +00:00
|
|
|
#include <sys/mount.h>
|
2022-04-21 22:18:29 +00:00
|
|
|
#endif
|
|
|
|
#ifdef HAVE_SYS_IOCTL_H
|
2014-02-06 03:54:12 +00:00
|
|
|
#include <sys/ioctl.h>
|
2017-12-01 23:03:30 +00:00
|
|
|
#endif
|
2017-11-14 18:53:32 +00:00
|
|
|
#ifdef HAVE_LINUX_HDREG_H
|
2014-02-06 03:54:12 +00:00
|
|
|
#include <linux/hdreg.h>
|
2017-11-14 18:53:32 +00:00
|
|
|
#endif
|
2014-02-06 03:54:12 +00:00
|
|
|
|
libf2fs_io: Add user-space cache
Implemented cache options in F2FS configuration 'c' (i.e. user
options):
* use c.cache_config.num_cache_entry to set the number of
cache entries (in block), minimum 1024, or 0 to disable cache.
* use c.cache_config.max_hash_collision to set maximum hash
collision (max 16).
* use c.cavhe_config.dbg_en to enable printing of debug messages.
Cache properties:
* Per block save based (block size = f2fs block size = 4K Bytes)
* Device block indices are used to hash (simple modular hash
function) to cache indices.
* A hash collision (when a block is hashed to an already occupied
cache block) will trigger offset-based relocation to max 16
other places, at first empty slot found policy.
* A hash collision with all 17 possible relocating places occupied
will evict the oldest used one (replaced by the new read block).
A global 'use' tick is kept ticking-up per block read for this
purpose.
* On write cache hit, it will update the corresponding cached
block.
* On write cache miss, the data won't be cached.
* Write-through policy only: never delayed write.
* On read cache hit, it returns the cached block.
* On read cache miss, it issues an I/O read to the kernel, and
then caches and returns the block.
* Currently read ahead is not implemented. Yet, the read ahead
logic is kept as before, i.e. using kernel's read ahead
mechanism.
Signed-off-by: Robin Hsu <robinhsu@google.com>
[Jaegeuk Kim: Fix one bug in dcache_update_rw()]
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-11-18 10:13:07 +00:00
|
|
|
#include <stdbool.h>
|
|
|
|
#include <assert.h>
|
|
|
|
#include <inttypes.h>
|
|
|
|
#include "f2fs_fs.h"
|
2014-02-06 03:54:12 +00:00
|
|
|
|
2016-09-17 01:41:00 +00:00
|
|
|
struct f2fs_configuration c;
|
2014-02-06 03:54:12 +00:00
|
|
|
|
2022-04-21 22:18:23 +00:00
|
|
|
#ifdef HAVE_SPARSE_SPARSE_H
|
2017-06-16 18:32:58 +00:00
|
|
|
#include <sparse/sparse.h>
|
|
|
|
struct sparse_file *f2fs_sparse_file;
|
2017-11-30 04:34:37 +00:00
|
|
|
static char **blocks;
|
2022-04-21 22:18:11 +00:00
|
|
|
uint64_t blocks_count;
|
mkfs.f2fs: write fill chunk in sparse file for zeroed block
As zhaowuyun reported:
we met one problem of f2fs, and found one issue of make_f2fs, so I write
this email to search for your help to confirm this issue.
The issue was found on one of Android projects. We use f2fs as the filesystem
of userdata, and make sparse userdata.img using following command, which
invoked in script mkf2fsuserimg.sh
make_f2fs -S $SIZE -f -O encrypt -O quota -O verity $MKFS_OPTS $OUTPUT_FILE
use fastboot to flash this userdata.img to device, and it encountered f2fs
problem and leading to the mount fail of data partition.
we can make this issue 100% persent reproduced by making the data partition
dirty before flashing userdata.img.
suspect that issue is caused by the dirty data in the data partition.
so we checked that source code of make_f2fs in f2fs-tool, found that when
making f2fs, it use dev_fill to do some process:
...
we change code to the following, and the issue is gone.
if (c.sparse_mode)
return dev_write(buf, offset, len);
Chao Yu:
>
> After checking the codes, IIUC, I guess the problem here is, unlike
> img2simg, mkfs.f2fs won't record zeroed block in sparse image, so
> during transforming to normal image, some critical region like
> NAT/SIT/CP.payload area weren't be zeroed correctly, later kernel may
> load obsoleting data from those region.
>
> Also, The way you provide will obviously increase the size of sparse
> file, since with it we need to write all zeroed blocks of
> NAT/SIT/CP.payload to sparse file, it's not needed.
>
> Not sure, maybe we should use sparse_file_add_fill() to record zeroed
> blocks, so that this will make formatted image more like img2simged one.
Jaegeuk:
> We have to call sparse_file_add_fill() for dev_fill().
This patch fixes to support writing fill chunk sparse file for those
zeroed blocks in mkfs.f2fs.
Reported-and-tested-by: zhaowuyun <zhaowuyun@wingtech.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-05-05 07:45:05 +00:00
|
|
|
static char *zeroed_block;
|
2017-06-16 18:32:58 +00:00
|
|
|
#endif
|
|
|
|
|
2016-11-02 00:23:40 +00:00
|
|
|
static int __get_device_fd(__u64 *offset)
|
|
|
|
{
|
|
|
|
__u64 blk_addr = *offset >> F2FS_BLKSIZE_BITS;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < c.ndevs; i++) {
|
|
|
|
if (c.devices[i].start_blkaddr <= blk_addr &&
|
|
|
|
c.devices[i].end_blkaddr >= blk_addr) {
|
|
|
|
*offset -=
|
|
|
|
c.devices[i].start_blkaddr << F2FS_BLKSIZE_BITS;
|
|
|
|
return c.devices[i].fd;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-11-14 18:53:32 +00:00
|
|
|
#ifndef HAVE_LSEEK64
|
|
|
|
typedef off_t off64_t;
|
|
|
|
|
|
|
|
static inline off64_t lseek64(int fd, __u64 offset, int set)
|
|
|
|
{
|
|
|
|
return lseek(fd, offset, set);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
libf2fs_io: Add user-space cache
Implemented cache options in F2FS configuration 'c' (i.e. user
options):
* use c.cache_config.num_cache_entry to set the number of
cache entries (in block), minimum 1024, or 0 to disable cache.
* use c.cache_config.max_hash_collision to set maximum hash
collision (max 16).
* use c.cavhe_config.dbg_en to enable printing of debug messages.
Cache properties:
* Per block save based (block size = f2fs block size = 4K Bytes)
* Device block indices are used to hash (simple modular hash
function) to cache indices.
* A hash collision (when a block is hashed to an already occupied
cache block) will trigger offset-based relocation to max 16
other places, at first empty slot found policy.
* A hash collision with all 17 possible relocating places occupied
will evict the oldest used one (replaced by the new read block).
A global 'use' tick is kept ticking-up per block read for this
purpose.
* On write cache hit, it will update the corresponding cached
block.
* On write cache miss, the data won't be cached.
* Write-through policy only: never delayed write.
* On read cache hit, it returns the cached block.
* On read cache miss, it issues an I/O read to the kernel, and
then caches and returns the block.
* Currently read ahead is not implemented. Yet, the read ahead
logic is kept as before, i.e. using kernel's read ahead
mechanism.
Signed-off-by: Robin Hsu <robinhsu@google.com>
[Jaegeuk Kim: Fix one bug in dcache_update_rw()]
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-11-18 10:13:07 +00:00
|
|
|
/* ---------- dev_cache, Least Used First (LUF) policy ------------------- */
|
|
|
|
/*
|
|
|
|
* Least used block will be the first victim to be replaced when max hash
|
|
|
|
* collision exceeds
|
|
|
|
*/
|
|
|
|
static bool *dcache_valid; /* is the cached block valid? */
|
|
|
|
static off64_t *dcache_blk; /* which block it cached */
|
|
|
|
static uint64_t *dcache_lastused; /* last used ticks for cache entries */
|
|
|
|
static char *dcache_buf; /* cached block data */
|
|
|
|
static uint64_t dcache_usetick; /* current use tick */
|
|
|
|
|
|
|
|
static uint64_t dcache_raccess;
|
|
|
|
static uint64_t dcache_rhit;
|
|
|
|
static uint64_t dcache_rmiss;
|
|
|
|
static uint64_t dcache_rreplace;
|
|
|
|
|
|
|
|
static bool dcache_exit_registered = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Shadow config:
|
|
|
|
*
|
|
|
|
* Active set of the configurations.
|
|
|
|
* Global configuration 'dcache_config' will be transferred here when
|
|
|
|
* when dcache_init() is called
|
|
|
|
*/
|
|
|
|
static dev_cache_config_t dcache_config = {0, 16, 1};
|
|
|
|
static bool dcache_initialized = false;
|
|
|
|
|
|
|
|
#define MIN_NUM_CACHE_ENTRY 1024L
|
|
|
|
#define MAX_MAX_HASH_COLLISION 16
|
|
|
|
|
|
|
|
static long dcache_relocate_offset0[] = {
|
|
|
|
20, -20, 40, -40, 80, -80, 160, -160,
|
|
|
|
320, -320, 640, -640, 1280, -1280, 2560, -2560,
|
|
|
|
};
|
|
|
|
static int dcache_relocate_offset[16];
|
|
|
|
|
|
|
|
static void dcache_print_statistics(void)
|
|
|
|
{
|
|
|
|
long i;
|
|
|
|
long useCnt;
|
|
|
|
|
|
|
|
/* Number of used cache entries */
|
|
|
|
useCnt = 0;
|
|
|
|
for (i = 0; i < dcache_config.num_cache_entry; i++)
|
|
|
|
if (dcache_valid[i])
|
|
|
|
++useCnt;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* c: number of cache entries
|
|
|
|
* u: used entries
|
|
|
|
* RA: number of read access blocks
|
|
|
|
* CH: cache hit
|
|
|
|
* CM: cache miss
|
|
|
|
* Repl: read cache replaced
|
|
|
|
*/
|
|
|
|
printf ("\nc, u, RA, CH, CM, Repl=\n");
|
|
|
|
printf ("%ld %ld %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
|
|
|
|
dcache_config.num_cache_entry,
|
|
|
|
useCnt,
|
|
|
|
dcache_raccess,
|
|
|
|
dcache_rhit,
|
|
|
|
dcache_rmiss,
|
|
|
|
dcache_rreplace);
|
|
|
|
}
|
|
|
|
|
|
|
|
void dcache_release(void)
|
|
|
|
{
|
|
|
|
if (!dcache_initialized)
|
|
|
|
return;
|
|
|
|
|
|
|
|
dcache_initialized = false;
|
|
|
|
|
|
|
|
if (c.cache_config.dbg_en)
|
|
|
|
dcache_print_statistics();
|
|
|
|
|
|
|
|
if (dcache_blk != NULL)
|
|
|
|
free(dcache_blk);
|
|
|
|
if (dcache_lastused != NULL)
|
|
|
|
free(dcache_lastused);
|
|
|
|
if (dcache_buf != NULL)
|
|
|
|
free(dcache_buf);
|
|
|
|
if (dcache_valid != NULL)
|
|
|
|
free(dcache_valid);
|
|
|
|
dcache_config.num_cache_entry = 0;
|
|
|
|
dcache_blk = NULL;
|
|
|
|
dcache_lastused = NULL;
|
|
|
|
dcache_buf = NULL;
|
|
|
|
dcache_valid = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
// return 0 for success, error code for failure.
|
|
|
|
static int dcache_alloc_all(long n)
|
|
|
|
{
|
|
|
|
if (n <= 0)
|
|
|
|
return -1;
|
|
|
|
if ((dcache_blk = (off64_t *) malloc(sizeof(off64_t) * n)) == NULL
|
|
|
|
|| (dcache_lastused = (uint64_t *)
|
|
|
|
malloc(sizeof(uint64_t) * n)) == NULL
|
|
|
|
|| (dcache_buf = (char *) malloc (F2FS_BLKSIZE * n)) == NULL
|
|
|
|
|| (dcache_valid = (bool *) malloc(sizeof(bool) * n)) == NULL)
|
|
|
|
{
|
|
|
|
dcache_release();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
dcache_config.num_cache_entry = n;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dcache_relocate_init(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int n0 = (sizeof(dcache_relocate_offset0)
|
|
|
|
/ sizeof(dcache_relocate_offset0[0]));
|
|
|
|
int n = (sizeof(dcache_relocate_offset)
|
|
|
|
/ sizeof(dcache_relocate_offset[0]));
|
|
|
|
|
|
|
|
ASSERT(n == n0);
|
|
|
|
for (i = 0; i < n && i < dcache_config.max_hash_collision; i++) {
|
|
|
|
if (labs(dcache_relocate_offset0[i])
|
|
|
|
> dcache_config.num_cache_entry / 2) {
|
|
|
|
dcache_config.max_hash_collision = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
dcache_relocate_offset[i] =
|
|
|
|
dcache_config.num_cache_entry
|
|
|
|
+ dcache_relocate_offset0[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void dcache_init(void)
|
|
|
|
{
|
|
|
|
long n;
|
|
|
|
|
|
|
|
if (c.cache_config.num_cache_entry <= 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* release previous cache init, if any */
|
|
|
|
dcache_release();
|
|
|
|
|
|
|
|
dcache_blk = NULL;
|
|
|
|
dcache_lastused = NULL;
|
|
|
|
dcache_buf = NULL;
|
|
|
|
dcache_valid = NULL;
|
|
|
|
|
|
|
|
dcache_config = c.cache_config;
|
|
|
|
|
|
|
|
n = max(MIN_NUM_CACHE_ENTRY, dcache_config.num_cache_entry);
|
|
|
|
|
|
|
|
/* halve alloc size until alloc succeed, or min cache reached */
|
|
|
|
while (dcache_alloc_all(n) != 0 && n != MIN_NUM_CACHE_ENTRY)
|
|
|
|
n = max(MIN_NUM_CACHE_ENTRY, n/2);
|
|
|
|
|
|
|
|
/* must be the last: data dependent on num_cache_entry */
|
|
|
|
dcache_relocate_init();
|
|
|
|
dcache_initialized = true;
|
|
|
|
|
|
|
|
if (!dcache_exit_registered) {
|
|
|
|
dcache_exit_registered = true;
|
|
|
|
atexit(dcache_release); /* auto release */
|
|
|
|
}
|
|
|
|
|
|
|
|
dcache_raccess = 0;
|
|
|
|
dcache_rhit = 0;
|
|
|
|
dcache_rmiss = 0;
|
|
|
|
dcache_rreplace = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline char *dcache_addr(long entry)
|
|
|
|
{
|
|
|
|
return dcache_buf + F2FS_BLKSIZE * entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* relocate on (n+1)-th collision */
|
|
|
|
static inline long dcache_relocate(long entry, int n)
|
|
|
|
{
|
|
|
|
assert(dcache_config.num_cache_entry != 0);
|
|
|
|
return (entry + dcache_relocate_offset[n]) %
|
|
|
|
dcache_config.num_cache_entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long dcache_find(off64_t blk)
|
|
|
|
{
|
|
|
|
register long n = dcache_config.num_cache_entry;
|
|
|
|
register unsigned m = dcache_config.max_hash_collision;
|
|
|
|
long entry, least_used, target;
|
|
|
|
unsigned try;
|
|
|
|
|
|
|
|
assert(n > 0);
|
|
|
|
target = least_used = entry = blk % n; /* simple modulo hash */
|
|
|
|
|
|
|
|
for (try = 0; try < m; try++) {
|
|
|
|
if (!dcache_valid[target] || dcache_blk[target] == blk)
|
|
|
|
return target; /* found target or empty cache slot */
|
|
|
|
if (dcache_lastused[target] < dcache_lastused[least_used])
|
|
|
|
least_used = target;
|
|
|
|
target = dcache_relocate(entry, try); /* next target */
|
|
|
|
}
|
|
|
|
return least_used; /* max search reached, return least used slot */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Physical read into cache */
|
|
|
|
static int dcache_io_read(int fd, long entry, off64_t offset, off64_t blk)
|
|
|
|
{
|
2024-03-28 04:02:02 +00:00
|
|
|
if (pread64(fd, dcache_buf + entry * F2FS_BLKSIZE,
|
|
|
|
F2FS_BLKSIZE, offset) < 0) {
|
libf2fs_io: Add user-space cache
Implemented cache options in F2FS configuration 'c' (i.e. user
options):
* use c.cache_config.num_cache_entry to set the number of
cache entries (in block), minimum 1024, or 0 to disable cache.
* use c.cache_config.max_hash_collision to set maximum hash
collision (max 16).
* use c.cavhe_config.dbg_en to enable printing of debug messages.
Cache properties:
* Per block save based (block size = f2fs block size = 4K Bytes)
* Device block indices are used to hash (simple modular hash
function) to cache indices.
* A hash collision (when a block is hashed to an already occupied
cache block) will trigger offset-based relocation to max 16
other places, at first empty slot found policy.
* A hash collision with all 17 possible relocating places occupied
will evict the oldest used one (replaced by the new read block).
A global 'use' tick is kept ticking-up per block read for this
purpose.
* On write cache hit, it will update the corresponding cached
block.
* On write cache miss, the data won't be cached.
* Write-through policy only: never delayed write.
* On read cache hit, it returns the cached block.
* On read cache miss, it issues an I/O read to the kernel, and
then caches and returns the block.
* Currently read ahead is not implemented. Yet, the read ahead
logic is kept as before, i.e. using kernel's read ahead
mechanism.
Signed-off-by: Robin Hsu <robinhsu@google.com>
[Jaegeuk Kim: Fix one bug in dcache_update_rw()]
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-11-18 10:13:07 +00:00
|
|
|
MSG(0, "\n read() fail.\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
dcache_lastused[entry] = ++dcache_usetick;
|
|
|
|
dcache_valid[entry] = true;
|
|
|
|
dcache_blk[entry] = blk;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* - Note: Read/Write are not symmetric:
|
|
|
|
* For read, we need to do it block by block, due to the cache nature:
|
|
|
|
* some blocks may be cached, and others don't.
|
|
|
|
* For write, since we always do a write-thru, we can join all writes into one,
|
|
|
|
* and write it once at the caller. This function updates the cache for write, but
|
|
|
|
* not the do a physical write. The caller is responsible for the physical write.
|
|
|
|
* - Note: We concentrate read/write together, due to the fact of similar structure to find
|
|
|
|
* the relavant cache entries
|
|
|
|
* - Return values:
|
|
|
|
* 0: success
|
|
|
|
* 1: cache not available (uninitialized)
|
|
|
|
* -1: error
|
|
|
|
*/
|
|
|
|
static int dcache_update_rw(int fd, void *buf, off64_t offset,
|
|
|
|
size_t byte_count, bool is_write)
|
|
|
|
{
|
|
|
|
off64_t blk;
|
|
|
|
int addr_in_blk;
|
|
|
|
off64_t start;
|
|
|
|
|
|
|
|
if (!dcache_initialized)
|
|
|
|
dcache_init(); /* auto initialize */
|
|
|
|
|
|
|
|
if (!dcache_initialized)
|
|
|
|
return 1; /* not available */
|
|
|
|
|
|
|
|
blk = offset / F2FS_BLKSIZE;
|
|
|
|
addr_in_blk = offset % F2FS_BLKSIZE;
|
|
|
|
start = blk * F2FS_BLKSIZE;
|
|
|
|
|
|
|
|
while (byte_count != 0) {
|
|
|
|
size_t cur_size = min(byte_count,
|
|
|
|
(size_t)(F2FS_BLKSIZE - addr_in_blk));
|
|
|
|
long entry = dcache_find(blk);
|
|
|
|
|
|
|
|
if (!is_write)
|
|
|
|
++dcache_raccess;
|
|
|
|
|
|
|
|
if (dcache_valid[entry] && dcache_blk[entry] == blk) {
|
|
|
|
/* cache hit */
|
|
|
|
if (is_write) /* write: update cache */
|
|
|
|
memcpy(dcache_addr(entry) + addr_in_blk,
|
|
|
|
buf, cur_size);
|
|
|
|
else
|
|
|
|
++dcache_rhit;
|
|
|
|
} else {
|
|
|
|
/* cache miss */
|
|
|
|
if (!is_write) {
|
|
|
|
int err;
|
|
|
|
++dcache_rmiss;
|
|
|
|
if (dcache_valid[entry])
|
|
|
|
++dcache_rreplace;
|
|
|
|
/* read: physical I/O read into cache */
|
|
|
|
err = dcache_io_read(fd, entry, start, blk);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* read: copy data from cache */
|
|
|
|
/* write: nothing to do, since we don't do physical write. */
|
|
|
|
if (!is_write)
|
|
|
|
memcpy(buf, dcache_addr(entry) + addr_in_blk,
|
|
|
|
cur_size);
|
|
|
|
|
|
|
|
/* next block */
|
|
|
|
++blk;
|
|
|
|
buf += cur_size;
|
|
|
|
start += F2FS_BLKSIZE;
|
|
|
|
byte_count -= cur_size;
|
|
|
|
addr_in_blk = 0;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* dcache_update_cache() just update cache, won't do physical I/O.
|
|
|
|
* Thus even no error, we need normal non-cache I/O for actual write
|
|
|
|
*
|
|
|
|
* return value: 1: cache not available
|
|
|
|
* 0: success, -1: I/O error
|
|
|
|
*/
|
|
|
|
int dcache_update_cache(int fd, void *buf, off64_t offset, size_t count)
|
|
|
|
{
|
|
|
|
return dcache_update_rw(fd, buf, offset, count, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* handles read into cache + read into buffer */
|
|
|
|
int dcache_read(int fd, void *buf, off64_t offset, size_t count)
|
|
|
|
{
|
|
|
|
return dcache_update_rw(fd, buf, offset, count, false);
|
|
|
|
}
|
|
|
|
|
2014-02-06 03:54:12 +00:00
|
|
|
/*
|
|
|
|
* IO interfaces
|
|
|
|
*/
|
2014-11-07 03:34:40 +00:00
|
|
|
int dev_read_version(void *buf, __u64 offset, size_t len)
|
|
|
|
{
|
2017-06-16 18:32:58 +00:00
|
|
|
if (c.sparse_mode)
|
|
|
|
return 0;
|
2024-03-28 04:02:02 +00:00
|
|
|
if (pread64(c.kd, buf, len, offset) < 0)
|
2014-11-07 03:34:40 +00:00
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-04-21 22:18:23 +00:00
|
|
|
#ifdef HAVE_SPARSE_SPARSE_H
|
2017-11-30 04:34:37 +00:00
|
|
|
static int sparse_read_blk(__u64 block, int count, void *buf)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
char *out = buf;
|
|
|
|
__u64 cur_block;
|
|
|
|
|
|
|
|
for (i = 0; i < count; ++i) {
|
|
|
|
cur_block = block + i;
|
|
|
|
if (blocks[cur_block])
|
|
|
|
memcpy(out + (i * F2FS_BLKSIZE),
|
|
|
|
blocks[cur_block], F2FS_BLKSIZE);
|
|
|
|
else if (blocks)
|
|
|
|
memset(out + (i * F2FS_BLKSIZE), 0, F2FS_BLKSIZE);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sparse_write_blk(__u64 block, int count, const void *buf)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
__u64 cur_block;
|
|
|
|
const char *in = buf;
|
|
|
|
|
|
|
|
for (i = 0; i < count; ++i) {
|
|
|
|
cur_block = block + i;
|
mkfs.f2fs: write fill chunk in sparse file for zeroed block
As zhaowuyun reported:
we met one problem of f2fs, and found one issue of make_f2fs, so I write
this email to search for your help to confirm this issue.
The issue was found on one of Android projects. We use f2fs as the filesystem
of userdata, and make sparse userdata.img using following command, which
invoked in script mkf2fsuserimg.sh
make_f2fs -S $SIZE -f -O encrypt -O quota -O verity $MKFS_OPTS $OUTPUT_FILE
use fastboot to flash this userdata.img to device, and it encountered f2fs
problem and leading to the mount fail of data partition.
we can make this issue 100% persent reproduced by making the data partition
dirty before flashing userdata.img.
suspect that issue is caused by the dirty data in the data partition.
so we checked that source code of make_f2fs in f2fs-tool, found that when
making f2fs, it use dev_fill to do some process:
...
we change code to the following, and the issue is gone.
if (c.sparse_mode)
return dev_write(buf, offset, len);
Chao Yu:
>
> After checking the codes, IIUC, I guess the problem here is, unlike
> img2simg, mkfs.f2fs won't record zeroed block in sparse image, so
> during transforming to normal image, some critical region like
> NAT/SIT/CP.payload area weren't be zeroed correctly, later kernel may
> load obsoleting data from those region.
>
> Also, The way you provide will obviously increase the size of sparse
> file, since with it we need to write all zeroed blocks of
> NAT/SIT/CP.payload to sparse file, it's not needed.
>
> Not sure, maybe we should use sparse_file_add_fill() to record zeroed
> blocks, so that this will make formatted image more like img2simged one.
Jaegeuk:
> We have to call sparse_file_add_fill() for dev_fill().
This patch fixes to support writing fill chunk sparse file for those
zeroed blocks in mkfs.f2fs.
Reported-and-tested-by: zhaowuyun <zhaowuyun@wingtech.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-05-05 07:45:05 +00:00
|
|
|
if (blocks[cur_block] == zeroed_block)
|
|
|
|
blocks[cur_block] = NULL;
|
2017-11-30 04:34:37 +00:00
|
|
|
if (!blocks[cur_block]) {
|
|
|
|
blocks[cur_block] = calloc(1, F2FS_BLKSIZE);
|
|
|
|
if (!blocks[cur_block])
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
memcpy(blocks[cur_block], in + (i * F2FS_BLKSIZE),
|
|
|
|
F2FS_BLKSIZE);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
mkfs.f2fs: write fill chunk in sparse file for zeroed block
As zhaowuyun reported:
we met one problem of f2fs, and found one issue of make_f2fs, so I write
this email to search for your help to confirm this issue.
The issue was found on one of Android projects. We use f2fs as the filesystem
of userdata, and make sparse userdata.img using following command, which
invoked in script mkf2fsuserimg.sh
make_f2fs -S $SIZE -f -O encrypt -O quota -O verity $MKFS_OPTS $OUTPUT_FILE
use fastboot to flash this userdata.img to device, and it encountered f2fs
problem and leading to the mount fail of data partition.
we can make this issue 100% persent reproduced by making the data partition
dirty before flashing userdata.img.
suspect that issue is caused by the dirty data in the data partition.
so we checked that source code of make_f2fs in f2fs-tool, found that when
making f2fs, it use dev_fill to do some process:
...
we change code to the following, and the issue is gone.
if (c.sparse_mode)
return dev_write(buf, offset, len);
Chao Yu:
>
> After checking the codes, IIUC, I guess the problem here is, unlike
> img2simg, mkfs.f2fs won't record zeroed block in sparse image, so
> during transforming to normal image, some critical region like
> NAT/SIT/CP.payload area weren't be zeroed correctly, later kernel may
> load obsoleting data from those region.
>
> Also, The way you provide will obviously increase the size of sparse
> file, since with it we need to write all zeroed blocks of
> NAT/SIT/CP.payload to sparse file, it's not needed.
>
> Not sure, maybe we should use sparse_file_add_fill() to record zeroed
> blocks, so that this will make formatted image more like img2simged one.
Jaegeuk:
> We have to call sparse_file_add_fill() for dev_fill().
This patch fixes to support writing fill chunk sparse file for those
zeroed blocks in mkfs.f2fs.
Reported-and-tested-by: zhaowuyun <zhaowuyun@wingtech.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-05-05 07:45:05 +00:00
|
|
|
static int sparse_write_zeroed_blk(__u64 block, int count)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
__u64 cur_block;
|
|
|
|
|
|
|
|
for (i = 0; i < count; ++i) {
|
|
|
|
cur_block = block + i;
|
|
|
|
if (blocks[cur_block])
|
|
|
|
continue;
|
|
|
|
blocks[cur_block] = zeroed_block;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-26 07:40:09 +00:00
|
|
|
#ifdef SPARSE_CALLBACK_USES_SIZE_T
|
|
|
|
static int sparse_import_segment(void *UNUSED(priv), const void *data,
|
|
|
|
size_t len, unsigned int block, unsigned int nr_blocks)
|
|
|
|
#else
|
2017-11-30 04:34:37 +00:00
|
|
|
static int sparse_import_segment(void *UNUSED(priv), const void *data, int len,
|
|
|
|
unsigned int block, unsigned int nr_blocks)
|
2018-04-26 07:40:09 +00:00
|
|
|
#endif
|
2017-11-30 04:34:37 +00:00
|
|
|
{
|
|
|
|
/* Ignore chunk headers, only write the data */
|
|
|
|
if (!nr_blocks || len % F2FS_BLKSIZE)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return sparse_write_blk(block, nr_blocks, data);
|
|
|
|
}
|
|
|
|
|
mkfs.f2fs: write fill chunk in sparse file for zeroed block
As zhaowuyun reported:
we met one problem of f2fs, and found one issue of make_f2fs, so I write
this email to search for your help to confirm this issue.
The issue was found on one of Android projects. We use f2fs as the filesystem
of userdata, and make sparse userdata.img using following command, which
invoked in script mkf2fsuserimg.sh
make_f2fs -S $SIZE -f -O encrypt -O quota -O verity $MKFS_OPTS $OUTPUT_FILE
use fastboot to flash this userdata.img to device, and it encountered f2fs
problem and leading to the mount fail of data partition.
we can make this issue 100% persent reproduced by making the data partition
dirty before flashing userdata.img.
suspect that issue is caused by the dirty data in the data partition.
so we checked that source code of make_f2fs in f2fs-tool, found that when
making f2fs, it use dev_fill to do some process:
...
we change code to the following, and the issue is gone.
if (c.sparse_mode)
return dev_write(buf, offset, len);
Chao Yu:
>
> After checking the codes, IIUC, I guess the problem here is, unlike
> img2simg, mkfs.f2fs won't record zeroed block in sparse image, so
> during transforming to normal image, some critical region like
> NAT/SIT/CP.payload area weren't be zeroed correctly, later kernel may
> load obsoleting data from those region.
>
> Also, The way you provide will obviously increase the size of sparse
> file, since with it we need to write all zeroed blocks of
> NAT/SIT/CP.payload to sparse file, it's not needed.
>
> Not sure, maybe we should use sparse_file_add_fill() to record zeroed
> blocks, so that this will make formatted image more like img2simged one.
Jaegeuk:
> We have to call sparse_file_add_fill() for dev_fill().
This patch fixes to support writing fill chunk sparse file for those
zeroed blocks in mkfs.f2fs.
Reported-and-tested-by: zhaowuyun <zhaowuyun@wingtech.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-05-05 07:45:05 +00:00
|
|
|
static int sparse_merge_blocks(uint64_t start, uint64_t num, int zero)
|
2017-11-30 04:34:37 +00:00
|
|
|
{
|
|
|
|
char *buf;
|
|
|
|
uint64_t i;
|
|
|
|
|
mkfs.f2fs: write fill chunk in sparse file for zeroed block
As zhaowuyun reported:
we met one problem of f2fs, and found one issue of make_f2fs, so I write
this email to search for your help to confirm this issue.
The issue was found on one of Android projects. We use f2fs as the filesystem
of userdata, and make sparse userdata.img using following command, which
invoked in script mkf2fsuserimg.sh
make_f2fs -S $SIZE -f -O encrypt -O quota -O verity $MKFS_OPTS $OUTPUT_FILE
use fastboot to flash this userdata.img to device, and it encountered f2fs
problem and leading to the mount fail of data partition.
we can make this issue 100% persent reproduced by making the data partition
dirty before flashing userdata.img.
suspect that issue is caused by the dirty data in the data partition.
so we checked that source code of make_f2fs in f2fs-tool, found that when
making f2fs, it use dev_fill to do some process:
...
we change code to the following, and the issue is gone.
if (c.sparse_mode)
return dev_write(buf, offset, len);
Chao Yu:
>
> After checking the codes, IIUC, I guess the problem here is, unlike
> img2simg, mkfs.f2fs won't record zeroed block in sparse image, so
> during transforming to normal image, some critical region like
> NAT/SIT/CP.payload area weren't be zeroed correctly, later kernel may
> load obsoleting data from those region.
>
> Also, The way you provide will obviously increase the size of sparse
> file, since with it we need to write all zeroed blocks of
> NAT/SIT/CP.payload to sparse file, it's not needed.
>
> Not sure, maybe we should use sparse_file_add_fill() to record zeroed
> blocks, so that this will make formatted image more like img2simged one.
Jaegeuk:
> We have to call sparse_file_add_fill() for dev_fill().
This patch fixes to support writing fill chunk sparse file for those
zeroed blocks in mkfs.f2fs.
Reported-and-tested-by: zhaowuyun <zhaowuyun@wingtech.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-05-05 07:45:05 +00:00
|
|
|
if (zero) {
|
|
|
|
blocks[start] = NULL;
|
|
|
|
return sparse_file_add_fill(f2fs_sparse_file, 0x0,
|
|
|
|
F2FS_BLKSIZE * num, start);
|
|
|
|
}
|
|
|
|
|
2017-11-30 04:34:37 +00:00
|
|
|
buf = calloc(num, F2FS_BLKSIZE);
|
|
|
|
if (!buf) {
|
|
|
|
fprintf(stderr, "failed to alloc %llu\n",
|
|
|
|
(unsigned long long)num * F2FS_BLKSIZE);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
|
|
|
memcpy(buf + i * F2FS_BLKSIZE, blocks[start + i], F2FS_BLKSIZE);
|
|
|
|
free(blocks[start + i]);
|
|
|
|
blocks[start + i] = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* free_sparse_blocks will release this buf. */
|
|
|
|
blocks[start] = buf;
|
|
|
|
|
|
|
|
return sparse_file_add_data(f2fs_sparse_file, blocks[start],
|
|
|
|
F2FS_BLKSIZE * num, start);
|
|
|
|
}
|
|
|
|
#else
|
2022-04-22 18:57:14 +00:00
|
|
|
static int sparse_read_blk(__u64 UNUSED(block),
|
|
|
|
int UNUSED(count), void *UNUSED(buf))
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sparse_write_blk(__u64 UNUSED(block),
|
|
|
|
int UNUSED(count), const void *UNUSED(buf))
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sparse_write_zeroed_blk(__u64 UNUSED(block), int UNUSED(count))
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2017-11-30 04:34:37 +00:00
|
|
|
#endif
|
|
|
|
|
2014-02-06 03:54:12 +00:00
|
|
|
int dev_read(void *buf, __u64 offset, size_t len)
|
|
|
|
{
|
2017-06-16 18:32:58 +00:00
|
|
|
int fd;
|
libf2fs_io: Add user-space cache
Implemented cache options in F2FS configuration 'c' (i.e. user
options):
* use c.cache_config.num_cache_entry to set the number of
cache entries (in block), minimum 1024, or 0 to disable cache.
* use c.cache_config.max_hash_collision to set maximum hash
collision (max 16).
* use c.cavhe_config.dbg_en to enable printing of debug messages.
Cache properties:
* Per block save based (block size = f2fs block size = 4K Bytes)
* Device block indices are used to hash (simple modular hash
function) to cache indices.
* A hash collision (when a block is hashed to an already occupied
cache block) will trigger offset-based relocation to max 16
other places, at first empty slot found policy.
* A hash collision with all 17 possible relocating places occupied
will evict the oldest used one (replaced by the new read block).
A global 'use' tick is kept ticking-up per block read for this
purpose.
* On write cache hit, it will update the corresponding cached
block.
* On write cache miss, the data won't be cached.
* Write-through policy only: never delayed write.
* On read cache hit, it returns the cached block.
* On read cache miss, it issues an I/O read to the kernel, and
then caches and returns the block.
* Currently read ahead is not implemented. Yet, the read ahead
logic is kept as before, i.e. using kernel's read ahead
mechanism.
Signed-off-by: Robin Hsu <robinhsu@google.com>
[Jaegeuk Kim: Fix one bug in dcache_update_rw()]
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-11-18 10:13:07 +00:00
|
|
|
int err;
|
2017-06-16 18:32:58 +00:00
|
|
|
|
|
|
|
if (c.sparse_mode)
|
2017-11-30 04:34:37 +00:00
|
|
|
return sparse_read_blk(offset / F2FS_BLKSIZE,
|
|
|
|
len / F2FS_BLKSIZE, buf);
|
2016-11-02 00:23:40 +00:00
|
|
|
|
2017-06-16 18:32:58 +00:00
|
|
|
fd = __get_device_fd(&offset);
|
2016-11-02 00:23:40 +00:00
|
|
|
if (fd < 0)
|
|
|
|
return fd;
|
|
|
|
|
libf2fs_io: Add user-space cache
Implemented cache options in F2FS configuration 'c' (i.e. user
options):
* use c.cache_config.num_cache_entry to set the number of
cache entries (in block), minimum 1024, or 0 to disable cache.
* use c.cache_config.max_hash_collision to set maximum hash
collision (max 16).
* use c.cavhe_config.dbg_en to enable printing of debug messages.
Cache properties:
* Per block save based (block size = f2fs block size = 4K Bytes)
* Device block indices are used to hash (simple modular hash
function) to cache indices.
* A hash collision (when a block is hashed to an already occupied
cache block) will trigger offset-based relocation to max 16
other places, at first empty slot found policy.
* A hash collision with all 17 possible relocating places occupied
will evict the oldest used one (replaced by the new read block).
A global 'use' tick is kept ticking-up per block read for this
purpose.
* On write cache hit, it will update the corresponding cached
block.
* On write cache miss, the data won't be cached.
* Write-through policy only: never delayed write.
* On read cache hit, it returns the cached block.
* On read cache miss, it issues an I/O read to the kernel, and
then caches and returns the block.
* Currently read ahead is not implemented. Yet, the read ahead
logic is kept as before, i.e. using kernel's read ahead
mechanism.
Signed-off-by: Robin Hsu <robinhsu@google.com>
[Jaegeuk Kim: Fix one bug in dcache_update_rw()]
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-11-18 10:13:07 +00:00
|
|
|
/* err = 1: cache not available, fall back to non-cache R/W */
|
|
|
|
/* err = 0: success, err=-1: I/O error */
|
|
|
|
err = dcache_read(fd, buf, (off64_t)offset, len);
|
|
|
|
if (err <= 0)
|
|
|
|
return err;
|
2024-03-28 04:02:02 +00:00
|
|
|
if (pread64(fd, buf, len, offset) < 0)
|
2014-02-06 03:54:12 +00:00
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-14 18:53:32 +00:00
|
|
|
#ifdef POSIX_FADV_WILLNEED
|
2014-12-02 22:08:11 +00:00
|
|
|
int dev_readahead(__u64 offset, size_t len)
|
2017-11-14 18:53:32 +00:00
|
|
|
#else
|
|
|
|
int dev_readahead(__u64 offset, size_t UNUSED(len))
|
|
|
|
#endif
|
2014-12-02 22:08:11 +00:00
|
|
|
{
|
2016-11-02 00:23:40 +00:00
|
|
|
int fd = __get_device_fd(&offset);
|
|
|
|
|
|
|
|
if (fd < 0)
|
|
|
|
return fd;
|
2014-12-02 22:08:11 +00:00
|
|
|
#ifdef POSIX_FADV_WILLNEED
|
2016-11-02 00:23:40 +00:00
|
|
|
return posix_fadvise(fd, offset, len, POSIX_FADV_WILLNEED);
|
2014-12-02 22:08:11 +00:00
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-02-06 03:54:12 +00:00
|
|
|
int dev_write(void *buf, __u64 offset, size_t len)
|
|
|
|
{
|
2017-06-16 18:32:58 +00:00
|
|
|
int fd;
|
|
|
|
|
2017-11-02 03:56:09 +00:00
|
|
|
if (c.dry_run)
|
|
|
|
return 0;
|
|
|
|
|
2017-06-16 18:32:58 +00:00
|
|
|
if (c.sparse_mode)
|
2017-11-30 04:34:37 +00:00
|
|
|
return sparse_write_blk(offset / F2FS_BLKSIZE,
|
|
|
|
len / F2FS_BLKSIZE, buf);
|
2016-11-02 00:23:40 +00:00
|
|
|
|
2017-06-16 18:32:58 +00:00
|
|
|
fd = __get_device_fd(&offset);
|
2016-11-02 00:23:40 +00:00
|
|
|
if (fd < 0)
|
|
|
|
return fd;
|
|
|
|
|
libf2fs_io: Add user-space cache
Implemented cache options in F2FS configuration 'c' (i.e. user
options):
* use c.cache_config.num_cache_entry to set the number of
cache entries (in block), minimum 1024, or 0 to disable cache.
* use c.cache_config.max_hash_collision to set maximum hash
collision (max 16).
* use c.cavhe_config.dbg_en to enable printing of debug messages.
Cache properties:
* Per block save based (block size = f2fs block size = 4K Bytes)
* Device block indices are used to hash (simple modular hash
function) to cache indices.
* A hash collision (when a block is hashed to an already occupied
cache block) will trigger offset-based relocation to max 16
other places, at first empty slot found policy.
* A hash collision with all 17 possible relocating places occupied
will evict the oldest used one (replaced by the new read block).
A global 'use' tick is kept ticking-up per block read for this
purpose.
* On write cache hit, it will update the corresponding cached
block.
* On write cache miss, the data won't be cached.
* Write-through policy only: never delayed write.
* On read cache hit, it returns the cached block.
* On read cache miss, it issues an I/O read to the kernel, and
then caches and returns the block.
* Currently read ahead is not implemented. Yet, the read ahead
logic is kept as before, i.e. using kernel's read ahead
mechanism.
Signed-off-by: Robin Hsu <robinhsu@google.com>
[Jaegeuk Kim: Fix one bug in dcache_update_rw()]
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-11-18 10:13:07 +00:00
|
|
|
/*
|
|
|
|
* dcache_update_cache() just update cache, won't do I/O.
|
|
|
|
* Thus even no error, we need normal non-cache I/O for actual write
|
|
|
|
*/
|
|
|
|
if (dcache_update_cache(fd, buf, (off64_t)offset, len) < 0)
|
|
|
|
return -1;
|
2024-03-28 04:02:02 +00:00
|
|
|
if (pwrite64(fd, buf, len, offset) < 0)
|
2014-02-06 03:54:12 +00:00
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-08-29 18:46:25 +00:00
|
|
|
int dev_write_block(void *buf, __u64 blk_addr)
|
|
|
|
{
|
2016-09-16 01:44:41 +00:00
|
|
|
return dev_write(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
|
2014-08-29 18:46:25 +00:00
|
|
|
}
|
|
|
|
|
2014-08-19 21:05:21 +00:00
|
|
|
int dev_write_dump(void *buf, __u64 offset, size_t len)
|
|
|
|
{
|
2024-03-28 04:02:02 +00:00
|
|
|
if (pwrite64(c.dump_fd, buf, len, offset) < 0)
|
2014-08-19 21:05:21 +00:00
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-03-31 22:33:01 +00:00
|
|
|
int dev_fill(void *buf, __u64 offset, size_t len)
|
|
|
|
{
|
2017-06-16 18:32:58 +00:00
|
|
|
int fd;
|
|
|
|
|
|
|
|
if (c.sparse_mode)
|
mkfs.f2fs: write fill chunk in sparse file for zeroed block
As zhaowuyun reported:
we met one problem of f2fs, and found one issue of make_f2fs, so I write
this email to search for your help to confirm this issue.
The issue was found on one of Android projects. We use f2fs as the filesystem
of userdata, and make sparse userdata.img using following command, which
invoked in script mkf2fsuserimg.sh
make_f2fs -S $SIZE -f -O encrypt -O quota -O verity $MKFS_OPTS $OUTPUT_FILE
use fastboot to flash this userdata.img to device, and it encountered f2fs
problem and leading to the mount fail of data partition.
we can make this issue 100% persent reproduced by making the data partition
dirty before flashing userdata.img.
suspect that issue is caused by the dirty data in the data partition.
so we checked that source code of make_f2fs in f2fs-tool, found that when
making f2fs, it use dev_fill to do some process:
...
we change code to the following, and the issue is gone.
if (c.sparse_mode)
return dev_write(buf, offset, len);
Chao Yu:
>
> After checking the codes, IIUC, I guess the problem here is, unlike
> img2simg, mkfs.f2fs won't record zeroed block in sparse image, so
> during transforming to normal image, some critical region like
> NAT/SIT/CP.payload area weren't be zeroed correctly, later kernel may
> load obsoleting data from those region.
>
> Also, The way you provide will obviously increase the size of sparse
> file, since with it we need to write all zeroed blocks of
> NAT/SIT/CP.payload to sparse file, it's not needed.
>
> Not sure, maybe we should use sparse_file_add_fill() to record zeroed
> blocks, so that this will make formatted image more like img2simged one.
Jaegeuk:
> We have to call sparse_file_add_fill() for dev_fill().
This patch fixes to support writing fill chunk sparse file for those
zeroed blocks in mkfs.f2fs.
Reported-and-tested-by: zhaowuyun <zhaowuyun@wingtech.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-05-05 07:45:05 +00:00
|
|
|
return sparse_write_zeroed_blk(offset / F2FS_BLKSIZE,
|
|
|
|
len / F2FS_BLKSIZE);
|
2016-11-02 00:23:40 +00:00
|
|
|
|
2017-06-16 18:32:58 +00:00
|
|
|
fd = __get_device_fd(&offset);
|
2016-11-02 00:23:40 +00:00
|
|
|
if (fd < 0)
|
|
|
|
return fd;
|
|
|
|
|
2014-03-31 22:33:01 +00:00
|
|
|
/* Only allow fill to zero */
|
|
|
|
if (*((__u8*)buf))
|
|
|
|
return -1;
|
2024-03-28 04:02:02 +00:00
|
|
|
if (pwrite64(fd, buf, len, offset) < 0)
|
2014-03-31 22:33:01 +00:00
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-09-16 01:44:41 +00:00
|
|
|
int dev_fill_block(void *buf, __u64 blk_addr)
|
2014-02-06 03:54:12 +00:00
|
|
|
{
|
2016-09-16 01:44:41 +00:00
|
|
|
return dev_fill(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
|
2014-02-06 03:54:12 +00:00
|
|
|
}
|
|
|
|
|
2016-09-16 01:44:41 +00:00
|
|
|
int dev_read_block(void *buf, __u64 blk_addr)
|
2014-02-06 03:54:12 +00:00
|
|
|
{
|
2016-09-16 01:44:41 +00:00
|
|
|
return dev_read(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
|
2014-02-06 03:54:12 +00:00
|
|
|
}
|
2014-06-13 20:25:20 +00:00
|
|
|
|
2014-12-02 22:08:11 +00:00
|
|
|
int dev_reada_block(__u64 blk_addr)
|
|
|
|
{
|
2016-09-16 01:44:41 +00:00
|
|
|
return dev_readahead(blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
|
2014-12-02 22:08:11 +00:00
|
|
|
}
|
|
|
|
|
2017-11-24 09:33:39 +00:00
|
|
|
int f2fs_fsync_device(void)
|
2017-10-17 07:03:51 +00:00
|
|
|
{
|
2022-04-21 22:18:29 +00:00
|
|
|
#ifdef HAVE_FSYNC
|
2017-10-17 07:03:51 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < c.ndevs; i++) {
|
2017-11-24 09:33:39 +00:00
|
|
|
if (fsync(c.devices[i].fd) < 0) {
|
2017-10-17 07:03:51 +00:00
|
|
|
MSG(0, "\tError: Could not conduct fsync!!!\n");
|
2017-11-24 09:33:39 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2017-10-17 07:03:51 +00:00
|
|
|
}
|
2017-12-01 23:03:30 +00:00
|
|
|
#endif
|
2017-11-24 09:33:39 +00:00
|
|
|
return 0;
|
2017-10-17 07:03:51 +00:00
|
|
|
}
|
|
|
|
|
2017-11-30 04:34:37 +00:00
|
|
|
int f2fs_init_sparse_file(void)
|
|
|
|
{
|
2022-04-21 22:18:23 +00:00
|
|
|
#ifdef HAVE_SPARSE_SPARSE_H
|
2017-11-30 04:34:37 +00:00
|
|
|
if (c.func == MKFS) {
|
|
|
|
f2fs_sparse_file = sparse_file_new(F2FS_BLKSIZE, c.device_size);
|
2019-10-31 13:57:59 +00:00
|
|
|
if (!f2fs_sparse_file)
|
|
|
|
return -1;
|
2017-11-30 04:34:37 +00:00
|
|
|
} else {
|
|
|
|
f2fs_sparse_file = sparse_file_import(c.devices[0].fd,
|
|
|
|
true, false);
|
|
|
|
if (!f2fs_sparse_file)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
c.device_size = sparse_file_len(f2fs_sparse_file, 0, 0);
|
2022-04-21 22:18:11 +00:00
|
|
|
c.device_size &= (~((uint64_t)(F2FS_BLKSIZE - 1)));
|
2017-11-30 04:34:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sparse_file_block_size(f2fs_sparse_file) != F2FS_BLKSIZE) {
|
|
|
|
MSG(0, "\tError: Corrupted sparse file\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
blocks_count = c.device_size / F2FS_BLKSIZE;
|
|
|
|
blocks = calloc(blocks_count, sizeof(char *));
|
2018-11-26 13:36:00 +00:00
|
|
|
if (!blocks) {
|
|
|
|
MSG(0, "\tError: Calloc Failed for blocks!!!\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-30 04:34:37 +00:00
|
|
|
|
mkfs.f2fs: write fill chunk in sparse file for zeroed block
As zhaowuyun reported:
we met one problem of f2fs, and found one issue of make_f2fs, so I write
this email to search for your help to confirm this issue.
The issue was found on one of Android projects. We use f2fs as the filesystem
of userdata, and make sparse userdata.img using following command, which
invoked in script mkf2fsuserimg.sh
make_f2fs -S $SIZE -f -O encrypt -O quota -O verity $MKFS_OPTS $OUTPUT_FILE
use fastboot to flash this userdata.img to device, and it encountered f2fs
problem and leading to the mount fail of data partition.
we can make this issue 100% persent reproduced by making the data partition
dirty before flashing userdata.img.
suspect that issue is caused by the dirty data in the data partition.
so we checked that source code of make_f2fs in f2fs-tool, found that when
making f2fs, it use dev_fill to do some process:
...
we change code to the following, and the issue is gone.
if (c.sparse_mode)
return dev_write(buf, offset, len);
Chao Yu:
>
> After checking the codes, IIUC, I guess the problem here is, unlike
> img2simg, mkfs.f2fs won't record zeroed block in sparse image, so
> during transforming to normal image, some critical region like
> NAT/SIT/CP.payload area weren't be zeroed correctly, later kernel may
> load obsoleting data from those region.
>
> Also, The way you provide will obviously increase the size of sparse
> file, since with it we need to write all zeroed blocks of
> NAT/SIT/CP.payload to sparse file, it's not needed.
>
> Not sure, maybe we should use sparse_file_add_fill() to record zeroed
> blocks, so that this will make formatted image more like img2simged one.
Jaegeuk:
> We have to call sparse_file_add_fill() for dev_fill().
This patch fixes to support writing fill chunk sparse file for those
zeroed blocks in mkfs.f2fs.
Reported-and-tested-by: zhaowuyun <zhaowuyun@wingtech.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-05-05 07:45:05 +00:00
|
|
|
zeroed_block = calloc(1, F2FS_BLKSIZE);
|
|
|
|
if (!zeroed_block) {
|
|
|
|
MSG(0, "\tError: Calloc Failed for zeroed block!!!\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-11-30 04:34:37 +00:00
|
|
|
return sparse_file_foreach_chunk(f2fs_sparse_file, true, false,
|
|
|
|
sparse_import_segment, NULL);
|
|
|
|
#else
|
|
|
|
MSG(0, "\tError: Sparse mode is only supported for android\n");
|
|
|
|
return -1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-10-31 13:57:59 +00:00
|
|
|
void f2fs_release_sparse_resource(void)
|
|
|
|
{
|
2022-04-21 22:18:23 +00:00
|
|
|
#ifdef HAVE_SPARSE_SPARSE_H
|
2019-10-31 13:57:59 +00:00
|
|
|
int j;
|
|
|
|
|
|
|
|
if (c.sparse_mode) {
|
|
|
|
if (f2fs_sparse_file != NULL) {
|
|
|
|
sparse_file_destroy(f2fs_sparse_file);
|
|
|
|
f2fs_sparse_file = NULL;
|
|
|
|
}
|
|
|
|
for (j = 0; j < blocks_count; j++)
|
|
|
|
free(blocks[j]);
|
|
|
|
free(blocks);
|
|
|
|
blocks = NULL;
|
|
|
|
free(zeroed_block);
|
|
|
|
zeroed_block = NULL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
mkfs.f2fs: write fill chunk in sparse file for zeroed block
As zhaowuyun reported:
we met one problem of f2fs, and found one issue of make_f2fs, so I write
this email to search for your help to confirm this issue.
The issue was found on one of Android projects. We use f2fs as the filesystem
of userdata, and make sparse userdata.img using following command, which
invoked in script mkf2fsuserimg.sh
make_f2fs -S $SIZE -f -O encrypt -O quota -O verity $MKFS_OPTS $OUTPUT_FILE
use fastboot to flash this userdata.img to device, and it encountered f2fs
problem and leading to the mount fail of data partition.
we can make this issue 100% persent reproduced by making the data partition
dirty before flashing userdata.img.
suspect that issue is caused by the dirty data in the data partition.
so we checked that source code of make_f2fs in f2fs-tool, found that when
making f2fs, it use dev_fill to do some process:
...
we change code to the following, and the issue is gone.
if (c.sparse_mode)
return dev_write(buf, offset, len);
Chao Yu:
>
> After checking the codes, IIUC, I guess the problem here is, unlike
> img2simg, mkfs.f2fs won't record zeroed block in sparse image, so
> during transforming to normal image, some critical region like
> NAT/SIT/CP.payload area weren't be zeroed correctly, later kernel may
> load obsoleting data from those region.
>
> Also, The way you provide will obviously increase the size of sparse
> file, since with it we need to write all zeroed blocks of
> NAT/SIT/CP.payload to sparse file, it's not needed.
>
> Not sure, maybe we should use sparse_file_add_fill() to record zeroed
> blocks, so that this will make formatted image more like img2simged one.
Jaegeuk:
> We have to call sparse_file_add_fill() for dev_fill().
This patch fixes to support writing fill chunk sparse file for those
zeroed blocks in mkfs.f2fs.
Reported-and-tested-by: zhaowuyun <zhaowuyun@wingtech.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-05-05 07:45:05 +00:00
|
|
|
#define MAX_CHUNK_SIZE (1 * 1024 * 1024 * 1024ULL)
|
|
|
|
#define MAX_CHUNK_COUNT (MAX_CHUNK_SIZE / F2FS_BLKSIZE)
|
2017-11-24 09:33:39 +00:00
|
|
|
int f2fs_finalize_device(void)
|
2014-06-13 20:25:20 +00:00
|
|
|
{
|
2016-11-02 00:23:40 +00:00
|
|
|
int i;
|
2017-11-24 09:33:39 +00:00
|
|
|
int ret = 0;
|
2016-11-02 00:23:40 +00:00
|
|
|
|
2022-04-21 22:18:23 +00:00
|
|
|
#ifdef HAVE_SPARSE_SPARSE_H
|
2017-06-16 18:32:58 +00:00
|
|
|
if (c.sparse_mode) {
|
2017-11-30 04:34:37 +00:00
|
|
|
int64_t chunk_start = (blocks[0] == NULL) ? -1 : 0;
|
|
|
|
uint64_t j;
|
|
|
|
|
|
|
|
if (c.func != MKFS) {
|
|
|
|
sparse_file_destroy(f2fs_sparse_file);
|
|
|
|
ret = ftruncate(c.devices[0].fd, 0);
|
|
|
|
ASSERT(!ret);
|
|
|
|
lseek(c.devices[0].fd, 0, SEEK_SET);
|
|
|
|
f2fs_sparse_file = sparse_file_new(F2FS_BLKSIZE,
|
|
|
|
c.device_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (j = 0; j < blocks_count; ++j) {
|
mkfs.f2fs: write fill chunk in sparse file for zeroed block
As zhaowuyun reported:
we met one problem of f2fs, and found one issue of make_f2fs, so I write
this email to search for your help to confirm this issue.
The issue was found on one of Android projects. We use f2fs as the filesystem
of userdata, and make sparse userdata.img using following command, which
invoked in script mkf2fsuserimg.sh
make_f2fs -S $SIZE -f -O encrypt -O quota -O verity $MKFS_OPTS $OUTPUT_FILE
use fastboot to flash this userdata.img to device, and it encountered f2fs
problem and leading to the mount fail of data partition.
we can make this issue 100% persent reproduced by making the data partition
dirty before flashing userdata.img.
suspect that issue is caused by the dirty data in the data partition.
so we checked that source code of make_f2fs in f2fs-tool, found that when
making f2fs, it use dev_fill to do some process:
...
we change code to the following, and the issue is gone.
if (c.sparse_mode)
return dev_write(buf, offset, len);
Chao Yu:
>
> After checking the codes, IIUC, I guess the problem here is, unlike
> img2simg, mkfs.f2fs won't record zeroed block in sparse image, so
> during transforming to normal image, some critical region like
> NAT/SIT/CP.payload area weren't be zeroed correctly, later kernel may
> load obsoleting data from those region.
>
> Also, The way you provide will obviously increase the size of sparse
> file, since with it we need to write all zeroed blocks of
> NAT/SIT/CP.payload to sparse file, it's not needed.
>
> Not sure, maybe we should use sparse_file_add_fill() to record zeroed
> blocks, so that this will make formatted image more like img2simged one.
Jaegeuk:
> We have to call sparse_file_add_fill() for dev_fill().
This patch fixes to support writing fill chunk sparse file for those
zeroed blocks in mkfs.f2fs.
Reported-and-tested-by: zhaowuyun <zhaowuyun@wingtech.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-05-05 07:45:05 +00:00
|
|
|
if (chunk_start != -1) {
|
|
|
|
if (j - chunk_start >= MAX_CHUNK_COUNT) {
|
|
|
|
ret = sparse_merge_blocks(chunk_start,
|
|
|
|
j - chunk_start, 0);
|
|
|
|
ASSERT(!ret);
|
|
|
|
chunk_start = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (chunk_start == -1) {
|
|
|
|
if (!blocks[j])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (blocks[j] == zeroed_block) {
|
|
|
|
ret = sparse_merge_blocks(j, 1, 1);
|
|
|
|
ASSERT(!ret);
|
|
|
|
} else {
|
|
|
|
chunk_start = j;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (blocks[j] && blocks[j] != zeroed_block)
|
|
|
|
continue;
|
|
|
|
|
2018-10-15 13:24:57 +00:00
|
|
|
ret = sparse_merge_blocks(chunk_start,
|
mkfs.f2fs: write fill chunk in sparse file for zeroed block
As zhaowuyun reported:
we met one problem of f2fs, and found one issue of make_f2fs, so I write
this email to search for your help to confirm this issue.
The issue was found on one of Android projects. We use f2fs as the filesystem
of userdata, and make sparse userdata.img using following command, which
invoked in script mkf2fsuserimg.sh
make_f2fs -S $SIZE -f -O encrypt -O quota -O verity $MKFS_OPTS $OUTPUT_FILE
use fastboot to flash this userdata.img to device, and it encountered f2fs
problem and leading to the mount fail of data partition.
we can make this issue 100% persent reproduced by making the data partition
dirty before flashing userdata.img.
suspect that issue is caused by the dirty data in the data partition.
so we checked that source code of make_f2fs in f2fs-tool, found that when
making f2fs, it use dev_fill to do some process:
...
we change code to the following, and the issue is gone.
if (c.sparse_mode)
return dev_write(buf, offset, len);
Chao Yu:
>
> After checking the codes, IIUC, I guess the problem here is, unlike
> img2simg, mkfs.f2fs won't record zeroed block in sparse image, so
> during transforming to normal image, some critical region like
> NAT/SIT/CP.payload area weren't be zeroed correctly, later kernel may
> load obsoleting data from those region.
>
> Also, The way you provide will obviously increase the size of sparse
> file, since with it we need to write all zeroed blocks of
> NAT/SIT/CP.payload to sparse file, it's not needed.
>
> Not sure, maybe we should use sparse_file_add_fill() to record zeroed
> blocks, so that this will make formatted image more like img2simged one.
Jaegeuk:
> We have to call sparse_file_add_fill() for dev_fill().
This patch fixes to support writing fill chunk sparse file for those
zeroed blocks in mkfs.f2fs.
Reported-and-tested-by: zhaowuyun <zhaowuyun@wingtech.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-05-05 07:45:05 +00:00
|
|
|
j - chunk_start, 0);
|
|
|
|
ASSERT(!ret);
|
|
|
|
|
|
|
|
if (blocks[j] == zeroed_block) {
|
|
|
|
ret = sparse_merge_blocks(j, 1, 1);
|
|
|
|
ASSERT(!ret);
|
|
|
|
}
|
|
|
|
|
2018-10-15 13:24:57 +00:00
|
|
|
chunk_start = -1;
|
2017-11-30 04:34:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (chunk_start != -1) {
|
|
|
|
ret = sparse_merge_blocks(chunk_start,
|
mkfs.f2fs: write fill chunk in sparse file for zeroed block
As zhaowuyun reported:
we met one problem of f2fs, and found one issue of make_f2fs, so I write
this email to search for your help to confirm this issue.
The issue was found on one of Android projects. We use f2fs as the filesystem
of userdata, and make sparse userdata.img using following command, which
invoked in script mkf2fsuserimg.sh
make_f2fs -S $SIZE -f -O encrypt -O quota -O verity $MKFS_OPTS $OUTPUT_FILE
use fastboot to flash this userdata.img to device, and it encountered f2fs
problem and leading to the mount fail of data partition.
we can make this issue 100% persent reproduced by making the data partition
dirty before flashing userdata.img.
suspect that issue is caused by the dirty data in the data partition.
so we checked that source code of make_f2fs in f2fs-tool, found that when
making f2fs, it use dev_fill to do some process:
...
we change code to the following, and the issue is gone.
if (c.sparse_mode)
return dev_write(buf, offset, len);
Chao Yu:
>
> After checking the codes, IIUC, I guess the problem here is, unlike
> img2simg, mkfs.f2fs won't record zeroed block in sparse image, so
> during transforming to normal image, some critical region like
> NAT/SIT/CP.payload area weren't be zeroed correctly, later kernel may
> load obsoleting data from those region.
>
> Also, The way you provide will obviously increase the size of sparse
> file, since with it we need to write all zeroed blocks of
> NAT/SIT/CP.payload to sparse file, it's not needed.
>
> Not sure, maybe we should use sparse_file_add_fill() to record zeroed
> blocks, so that this will make formatted image more like img2simged one.
Jaegeuk:
> We have to call sparse_file_add_fill() for dev_fill().
This patch fixes to support writing fill chunk sparse file for those
zeroed blocks in mkfs.f2fs.
Reported-and-tested-by: zhaowuyun <zhaowuyun@wingtech.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-05-05 07:45:05 +00:00
|
|
|
blocks_count - chunk_start, 0);
|
2017-11-30 04:34:37 +00:00
|
|
|
ASSERT(!ret);
|
2017-06-16 18:32:58 +00:00
|
|
|
}
|
2017-11-30 04:34:37 +00:00
|
|
|
|
|
|
|
sparse_file_write(f2fs_sparse_file, c.devices[0].fd,
|
|
|
|
/*gzip*/0, /*sparse*/1, /*crc*/0);
|
|
|
|
|
2019-10-31 13:57:59 +00:00
|
|
|
f2fs_release_sparse_resource();
|
2017-06-16 18:32:58 +00:00
|
|
|
}
|
|
|
|
#endif
|
2014-06-13 20:25:20 +00:00
|
|
|
/*
|
|
|
|
* We should call fsync() to flush out all the dirty pages
|
|
|
|
* in the block device page cache.
|
|
|
|
*/
|
2016-11-02 00:23:40 +00:00
|
|
|
for (i = 0; i < c.ndevs; i++) {
|
2022-04-21 22:18:29 +00:00
|
|
|
#ifdef HAVE_FSYNC
|
2017-11-24 09:33:39 +00:00
|
|
|
ret = fsync(c.devices[i].fd);
|
|
|
|
if (ret < 0) {
|
2016-11-02 00:23:40 +00:00
|
|
|
MSG(0, "\tError: Could not conduct fsync!!!\n");
|
2017-11-24 09:33:39 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-12-01 23:03:30 +00:00
|
|
|
#endif
|
2017-11-24 09:33:39 +00:00
|
|
|
ret = close(c.devices[i].fd);
|
|
|
|
if (ret < 0) {
|
2016-11-02 00:23:40 +00:00
|
|
|
MSG(0, "\tError: Failed to close device file!!!\n");
|
2017-11-24 09:33:39 +00:00
|
|
|
break;
|
|
|
|
}
|
2018-10-11 22:21:14 +00:00
|
|
|
free(c.devices[i].path);
|
2020-07-21 16:38:11 +00:00
|
|
|
free(c.devices[i].zone_cap_blocks);
|
2016-11-02 00:23:40 +00:00
|
|
|
}
|
2016-09-17 01:41:00 +00:00
|
|
|
close(c.kd);
|
2017-11-24 09:33:39 +00:00
|
|
|
|
|
|
|
return ret;
|
2014-06-13 20:25:20 +00:00
|
|
|
}
|