f2fs-tools: support small RO partition

This patch adds a readonly feature on small partitions by eliminating
on-disk layout for data writes such as provisioning/reserved space and
SSA.

This requires f2fs updates to understand RO feature in superblock.

Tested 4 cases:
 sload.f2fs -c -a lz0 -f kernel/fs $DEV
 sload.f2fs -c -a lz4 -f kernel/fs $DEV
 sload.f2fs -c -r -a lz4 -f kernel/fs $DEV
 sload.f2fs -c -L 3 -r -a lz4 -f kernel/fs $DEV

after:
 mkfs.f2fs -O ro,compression,extra_attr -f $DEV
 fsck.f2fs $DEV

Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Jaegeuk Kim 2020-06-30 14:03:51 -07:00
parent a9594c6f56
commit 1d2683f551
8 changed files with 167 additions and 52 deletions

View File

@ -144,6 +144,7 @@ static int find_and_dec_hard_link_list(struct f2fs_sb_info *sbi, u32 nid)
static int is_valid_ssa_node_blk(struct f2fs_sb_info *sbi, u32 nid, static int is_valid_ssa_node_blk(struct f2fs_sb_info *sbi, u32 nid,
u32 blk_addr) u32 blk_addr)
{ {
struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
struct f2fs_summary_block *sum_blk; struct f2fs_summary_block *sum_blk;
struct f2fs_summary *sum_entry; struct f2fs_summary *sum_entry;
struct seg_entry * se; struct seg_entry * se;
@ -151,6 +152,9 @@ static int is_valid_ssa_node_blk(struct f2fs_sb_info *sbi, u32 nid,
int need_fix = 0, ret = 0; int need_fix = 0, ret = 0;
int type; int type;
if (get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))
return 0;
segno = GET_SEGNO(sbi, blk_addr); segno = GET_SEGNO(sbi, blk_addr);
offset = OFFSET_IN_SEG(sbi, blk_addr); offset = OFFSET_IN_SEG(sbi, blk_addr);
@ -261,6 +265,7 @@ out:
static int is_valid_ssa_data_blk(struct f2fs_sb_info *sbi, u32 blk_addr, static int is_valid_ssa_data_blk(struct f2fs_sb_info *sbi, u32 blk_addr,
u32 parent_nid, u16 idx_in_node, u8 version) u32 parent_nid, u16 idx_in_node, u8 version)
{ {
struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
struct f2fs_summary_block *sum_blk; struct f2fs_summary_block *sum_blk;
struct f2fs_summary *sum_entry; struct f2fs_summary *sum_entry;
struct seg_entry * se; struct seg_entry * se;
@ -268,6 +273,9 @@ static int is_valid_ssa_data_blk(struct f2fs_sb_info *sbi, u32 blk_addr,
int need_fix = 0, ret = 0; int need_fix = 0, ret = 0;
int type; int type;
if (get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))
return 0;
segno = GET_SEGNO(sbi, blk_addr); segno = GET_SEGNO(sbi, blk_addr);
offset = OFFSET_IN_SEG(sbi, blk_addr); offset = OFFSET_IN_SEG(sbi, blk_addr);
@ -2372,10 +2380,15 @@ static int check_curseg_write_pointer(struct f2fs_sb_info *UNUSED(sbi),
int check_curseg_offset(struct f2fs_sb_info *sbi, int type) int check_curseg_offset(struct f2fs_sb_info *sbi, int type)
{ {
struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, type); struct curseg_info *curseg = CURSEG_I(sbi, type);
struct seg_entry *se; struct seg_entry *se;
int j, nblocks; int j, nblocks;
if (get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO) &&
type != CURSEG_HOT_DATA && type != CURSEG_HOT_NODE)
return 0;
if ((curseg->next_blkoff >> 3) >= SIT_VBLOCK_MAP_SIZE) { if ((curseg->next_blkoff >> 3) >= SIT_VBLOCK_MAP_SIZE) {
ASSERT_MSG("Next block offset:%u is invalid, type:%d", ASSERT_MSG("Next block offset:%u is invalid, type:%d",
curseg->next_blkoff, type); curseg->next_blkoff, type);
@ -2958,6 +2971,7 @@ void fsck_chk_and_fix_write_pointers(struct f2fs_sb_info *sbi)
int fsck_chk_curseg_info(struct f2fs_sb_info *sbi) int fsck_chk_curseg_info(struct f2fs_sb_info *sbi)
{ {
struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
struct curseg_info *curseg; struct curseg_info *curseg;
struct seg_entry *se; struct seg_entry *se;
struct f2fs_summary_block *sum_blk; struct f2fs_summary_block *sum_blk;
@ -2968,6 +2982,10 @@ int fsck_chk_curseg_info(struct f2fs_sb_info *sbi)
se = get_seg_entry(sbi, curseg->segno); se = get_seg_entry(sbi, curseg->segno);
sum_blk = curseg->sum_blk; sum_blk = curseg->sum_blk;
if ((get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
(i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE))
continue;
if (se->type != i) { if (se->type != i) {
ASSERT_MSG("Incorrect curseg [%d]: segno [0x%x] " ASSERT_MSG("Incorrect curseg [%d]: segno [0x%x] "
"type(SIT) [%d]", i, curseg->segno, "type(SIT) [%d]", i, curseg->segno,
@ -3050,7 +3068,10 @@ int fsck_verify(struct f2fs_sb_info *sbi)
} }
c.bug_on = 1; c.bug_on = 1;
} }
printf("[FSCK] Max image size: %"PRIu64" MB, Free space: %u MB\n",
c.max_size >> 20,
(sbi->user_block_count - sbi->total_valid_block_count) >>
(20 - F2FS_BLKSIZE_BITS));
printf("[FSCK] Unreachable nat entries "); printf("[FSCK] Unreachable nat entries ");
if (nr_unref_nid == 0x0) { if (nr_unref_nid == 0x0) {
printf(" [Ok..] [0x%x]\n", nr_unref_nid); printf(" [Ok..] [0x%x]\n", nr_unref_nid);

View File

@ -915,6 +915,11 @@ static int do_defrag(struct f2fs_sb_info *sbi)
{ {
struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi); struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
if (get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) {
MSG(0, "Not support on readonly image.\n");
return -1;
}
if (c.defrag_start > get_sb(block_count)) if (c.defrag_start > get_sb(block_count))
goto out_range; goto out_range;
if (c.defrag_start < SM_I(sbi)->main_blkaddr) if (c.defrag_start < SM_I(sbi)->main_blkaddr)

View File

@ -568,6 +568,9 @@ void print_sb_state(struct f2fs_super_block *sb)
if (f & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) { if (f & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
MSG(0, "%s", " compression"); MSG(0, "%s", " compression");
} }
if (f & cpu_to_le32(F2FS_FEATURE_RO)) {
MSG(0, "%s", " ro");
}
MSG(0, "\n"); MSG(0, "\n");
MSG(0, "Info: superblock encrypt level = %d, salt = ", MSG(0, "Info: superblock encrypt level = %d, salt = ",
sb->encryption_level); sb->encryption_level);
@ -863,9 +866,10 @@ int sanity_check_raw_super(struct f2fs_super_block *sb, enum SB_ADDR sb_addr)
return -1; return -1;
} }
if (total_sections > segment_count || if (!(get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
(total_sections > segment_count ||
total_sections < F2FS_MIN_SEGMENTS || total_sections < F2FS_MIN_SEGMENTS ||
segs_per_sec > segment_count || !segs_per_sec) { segs_per_sec > segment_count || !segs_per_sec)) {
MSG(0, "\tInvalid segment/section count (%u, %u x %u)\n", MSG(0, "\tInvalid segment/section count (%u, %u x %u)\n",
segment_count, total_sections, segs_per_sec); segment_count, total_sections, segs_per_sec);
return 1; return 1;
@ -1262,14 +1266,16 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
ovp_segments = get_cp(overprov_segment_count); ovp_segments = get_cp(overprov_segment_count);
reserved_segments = get_cp(rsvd_segment_count); reserved_segments = get_cp(rsvd_segment_count);
if (fsmeta < F2FS_MIN_SEGMENT || ovp_segments == 0 || if (!(get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
reserved_segments == 0) { (fsmeta < F2FS_MIN_SEGMENT || ovp_segments == 0 ||
reserved_segments == 0)) {
MSG(0, "\tWrong layout: check mkfs.f2fs version\n"); MSG(0, "\tWrong layout: check mkfs.f2fs version\n");
return 1; return 1;
} }
user_block_count = get_cp(user_block_count); user_block_count = get_cp(user_block_count);
segment_count_main = get_sb(segment_count_main); segment_count_main = get_sb(segment_count_main) +
(cpu_to_le32(F2FS_FEATURE_RO) ? 1 : 0);
log_blocks_per_seg = get_sb(log_blocks_per_seg); log_blocks_per_seg = get_sb(log_blocks_per_seg);
if (!user_block_count || user_block_count >= if (!user_block_count || user_block_count >=
segment_count_main << log_blocks_per_seg) { segment_count_main << log_blocks_per_seg) {
@ -1892,11 +1898,15 @@ static void read_normal_summaries(struct f2fs_sb_info *sbi, int type)
void update_sum_entry(struct f2fs_sb_info *sbi, block_t blk_addr, void update_sum_entry(struct f2fs_sb_info *sbi, block_t blk_addr,
struct f2fs_summary *sum) struct f2fs_summary *sum)
{ {
struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
struct f2fs_summary_block *sum_blk; struct f2fs_summary_block *sum_blk;
u32 segno, offset; u32 segno, offset;
int type, ret; int type, ret;
struct seg_entry *se; struct seg_entry *se;
if (get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))
return;
segno = GET_SEGNO(sbi, blk_addr); segno = GET_SEGNO(sbi, blk_addr);
offset = OFFSET_IN_SEG(sbi, blk_addr); offset = OFFSET_IN_SEG(sbi, blk_addr);
@ -2731,18 +2741,17 @@ int find_next_free_block(struct f2fs_sb_info *sbi, u64 *to, int left,
bitmap = get_seg_bitmap(sbi, se); bitmap = get_seg_bitmap(sbi, se);
type = get_seg_type(sbi, se); type = get_seg_type(sbi, se);
if (vblocks == sbi->blocks_per_seg || if (vblocks == sbi->blocks_per_seg) {
IS_CUR_SEGNO(sbi, segno)) { next_segment:
*to = left ? START_BLOCK(sbi, segno) - 1:
START_BLOCK(sbi, segno + 1);
continue;
}
if (vblocks == 0 && not_enough) {
*to = left ? START_BLOCK(sbi, segno) - 1: *to = left ? START_BLOCK(sbi, segno) - 1:
START_BLOCK(sbi, segno + 1); START_BLOCK(sbi, segno + 1);
continue; continue;
} }
if (!(get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
IS_CUR_SEGNO(sbi, segno))
goto next_segment;
if (vblocks == 0 && not_enough)
goto next_segment;
if (vblocks == 0 && !(segno % sbi->segs_per_sec)) { if (vblocks == 0 && !(segno % sbi->segs_per_sec)) {
struct seg_entry *se2; struct seg_entry *se2;
@ -2773,17 +2782,24 @@ int find_next_free_block(struct f2fs_sb_info *sbi, u64 *to, int left,
static void move_one_curseg_info(struct f2fs_sb_info *sbi, u64 from, int left, static void move_one_curseg_info(struct f2fs_sb_info *sbi, u64 from, int left,
int i) int i)
{ {
struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, i); struct curseg_info *curseg = CURSEG_I(sbi, i);
struct f2fs_summary_block buf; struct f2fs_summary_block buf;
u32 old_segno; u32 old_segno;
u64 ssa_blk, to; u64 ssa_blk, to;
int ret; int ret;
if ((get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))) {
if (i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE)
return;
goto bypass_ssa;
}
/* update original SSA too */ /* update original SSA too */
ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno); ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
ret = dev_write_block(curseg->sum_blk, ssa_blk); ret = dev_write_block(curseg->sum_blk, ssa_blk);
ASSERT(ret >= 0); ASSERT(ret >= 0);
bypass_ssa:
to = from; to = from;
ret = find_next_free_block(sbi, &to, left, i, ret = find_next_free_block(sbi, &to, left, i,
c.zoned_model == F2FS_ZONED_HM); c.zoned_model == F2FS_ZONED_HM);
@ -3022,10 +3038,12 @@ void write_checkpoint(struct f2fs_sb_info *sbi)
ret = dev_write_block(curseg->sum_blk, cp_blk_no++); ret = dev_write_block(curseg->sum_blk, cp_blk_no++);
ASSERT(ret >= 0); ASSERT(ret >= 0);
/* update original SSA too */ if (!(get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))) {
ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno); /* update original SSA too */
ret = dev_write_block(curseg->sum_blk, ssa_blk); ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
ASSERT(ret >= 0); ret = dev_write_block(curseg->sum_blk, ssa_blk);
ASSERT(ret >= 0);
}
} }
/* Write nat bits */ /* Write nat bits */

View File

@ -61,6 +61,7 @@ void set_data_blkaddr(struct dnode_of_data *dn)
block_t new_node_block(struct f2fs_sb_info *sbi, block_t new_node_block(struct f2fs_sb_info *sbi,
struct dnode_of_data *dn, unsigned int ofs) struct dnode_of_data *dn, unsigned int ofs)
{ {
struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
struct f2fs_node *f2fs_inode; struct f2fs_node *f2fs_inode;
struct f2fs_node *node_blk; struct f2fs_node *node_blk;
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
@ -89,6 +90,10 @@ block_t new_node_block(struct f2fs_sb_info *sbi,
type = CURSEG_WARM_NODE; type = CURSEG_WARM_NODE;
} }
if ((get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
type != CURSEG_HOT_NODE)
type = CURSEG_HOT_NODE;
get_node_info(sbi, dn->nid, &ni); get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, 0, ni.version); set_summary(&sum, dn->nid, 0, ni.version);
ret = reserve_new_block(sbi, &blkaddr, &sum, type, !ofs); ret = reserve_new_block(sbi, &blkaddr, &sum, type, !ofs);

View File

@ -104,11 +104,16 @@ int reserve_new_block(struct f2fs_sb_info *sbi, block_t *to,
int new_data_block(struct f2fs_sb_info *sbi, void *block, int new_data_block(struct f2fs_sb_info *sbi, void *block,
struct dnode_of_data *dn, int type) struct dnode_of_data *dn, int type)
{ {
struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
struct f2fs_summary sum; struct f2fs_summary sum;
struct node_info ni; struct node_info ni;
unsigned int blkaddr = datablock_addr(dn->node_blk, dn->ofs_in_node); unsigned int blkaddr = datablock_addr(dn->node_blk, dn->ofs_in_node);
int ret; int ret;
if ((get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
type != CURSEG_HOT_DATA)
type = CURSEG_HOT_DATA;
ASSERT(dn->node_blk); ASSERT(dn->node_blk);
memset(block, 0, BLOCK_SZ); memset(block, 0, BLOCK_SZ);

View File

@ -458,6 +458,7 @@ struct f2fs_configuration {
u_int64_t wanted_total_sectors; u_int64_t wanted_total_sectors;
u_int64_t wanted_sector_size; u_int64_t wanted_sector_size;
u_int64_t target_sectors; u_int64_t target_sectors;
u_int64_t max_size;
u_int32_t sectors_per_blk; u_int32_t sectors_per_blk;
u_int32_t blks_per_seg; u_int32_t blks_per_seg;
__u8 init_version[VERSION_LEN + 1]; __u8 init_version[VERSION_LEN + 1];
@ -716,7 +717,8 @@ enum {
#define F2FS_FEATURE_VERITY 0x0400 /* reserved */ #define F2FS_FEATURE_VERITY 0x0400 /* reserved */
#define F2FS_FEATURE_SB_CHKSUM 0x0800 #define F2FS_FEATURE_SB_CHKSUM 0x0800
#define F2FS_FEATURE_CASEFOLD 0x1000 #define F2FS_FEATURE_CASEFOLD 0x1000
#define F2FS_FEATURE_COMPRESSION 0x2000 #define F2FS_FEATURE_COMPRESSION 0x2000
#define F2FS_FEATURE_RO 0x4000
#define MAX_VOLUME_NAME 512 #define MAX_VOLUME_NAME 512
@ -1566,6 +1568,7 @@ struct feature feature_table[] = { \
{ "sb_checksum", F2FS_FEATURE_SB_CHKSUM }, \ { "sb_checksum", F2FS_FEATURE_SB_CHKSUM }, \
{ "casefold", F2FS_FEATURE_CASEFOLD }, \ { "casefold", F2FS_FEATURE_CASEFOLD }, \
{ "compression", F2FS_FEATURE_COMPRESSION }, \ { "compression", F2FS_FEATURE_COMPRESSION }, \
{ "ro", F2FS_FEATURE_RO}, \
{ NULL, 0x0}, \ { NULL, 0x0}, \
}; };

View File

@ -507,6 +507,9 @@ int dev_read(void *buf, __u64 offset, size_t len)
int fd; int fd;
int err; int err;
if (c.max_size < (offset + len))
c.max_size = offset + len;
if (c.sparse_mode) if (c.sparse_mode)
return sparse_read_blk(offset / F2FS_BLKSIZE, return sparse_read_blk(offset / F2FS_BLKSIZE,
len / F2FS_BLKSIZE, buf); len / F2FS_BLKSIZE, buf);
@ -548,6 +551,9 @@ int dev_write(void *buf, __u64 offset, size_t len)
{ {
int fd; int fd;
if (c.max_size < (offset + len))
c.max_size = offset + len;
if (c.dry_run) if (c.dry_run)
return 0; return 0;
@ -590,6 +596,9 @@ int dev_fill(void *buf, __u64 offset, size_t len)
{ {
int fd; int fd;
if (c.max_size < (offset + len))
c.max_size = offset + len;
if (c.sparse_mode) if (c.sparse_mode)
return sparse_write_zeroed_blk(offset / F2FS_BLKSIZE, return sparse_write_zeroed_blk(offset / F2FS_BLKSIZE,
len / F2FS_BLKSIZE); len / F2FS_BLKSIZE);

View File

@ -212,7 +212,7 @@ static int f2fs_prepare_super_block(void)
u_int64_t total_meta_zones, total_meta_segments; u_int64_t total_meta_zones, total_meta_segments;
u_int32_t sit_bitmap_size, max_sit_bitmap_size; u_int32_t sit_bitmap_size, max_sit_bitmap_size;
u_int32_t max_nat_bitmap_size, max_nat_segments; u_int32_t max_nat_bitmap_size, max_nat_segments;
u_int32_t total_zones; u_int32_t total_zones, avail_zones;
enum quota_type qtype; enum quota_type qtype;
int i; int i;
@ -250,6 +250,9 @@ static int f2fs_prepare_super_block(void)
zone_size_bytes * zone_size_bytes - zone_size_bytes * zone_size_bytes -
(u_int64_t) c.start_sector * DEFAULT_SECTOR_SIZE; (u_int64_t) c.start_sector * DEFAULT_SECTOR_SIZE;
if (c.feature & cpu_to_le32(F2FS_FEATURE_RO))
zone_align_start_offset = 8192;
if (c.start_sector % DEFAULT_SECTORS_PER_BLOCK) { if (c.start_sector % DEFAULT_SECTORS_PER_BLOCK) {
MSG(1, "\t%s: Align start sector number to the page unit\n", MSG(1, "\t%s: Align start sector number to the page unit\n",
c.zoned_mode ? "FAIL" : "WARN"); c.zoned_mode ? "FAIL" : "WARN");
@ -400,7 +403,10 @@ static int f2fs_prepare_super_block(void)
get_sb(segment_count_nat))) * get_sb(segment_count_nat))) *
c.blks_per_seg; c.blks_per_seg;
blocks_for_ssa = total_valid_blks_available / if (c.feature & cpu_to_le32(F2FS_FEATURE_RO))
blocks_for_ssa = 0;
else
blocks_for_ssa = total_valid_blks_available /
c.blks_per_seg + 1; c.blks_per_seg + 1;
set_sb(segment_count_ssa, SEG_ALIGN(blocks_for_ssa)); set_sb(segment_count_ssa, SEG_ALIGN(blocks_for_ssa));
@ -457,7 +463,13 @@ static int f2fs_prepare_super_block(void)
(2 * (100 / c.overprovision + 1) + NR_CURSEG_TYPE) * (2 * (100 / c.overprovision + 1) + NR_CURSEG_TYPE) *
round_up(f2fs_get_usable_segments(sb), get_sb(section_count)); round_up(f2fs_get_usable_segments(sb), get_sb(section_count));
if (c.overprovision == 0 || c.total_segments < F2FS_MIN_SEGMENTS || if (c.feature & cpu_to_le32(F2FS_FEATURE_RO)) {
c.overprovision = 0;
c.reserved_segments = 0;
}
if ((!(c.feature & cpu_to_le32(F2FS_FEATURE_RO)) &&
c.overprovision == 0) ||
c.total_segments < F2FS_MIN_SEGMENTS ||
(c.devices[0].total_sectors * (c.devices[0].total_sectors *
c.sector_size < zone_align_start_offset) || c.sector_size < zone_align_start_offset) ||
(get_sb(segment_count_main) - NR_CURSEG_TYPE) < (get_sb(segment_count_main) - NR_CURSEG_TYPE) <
@ -503,13 +515,25 @@ static int f2fs_prepare_super_block(void)
if (c.feature & cpu_to_le32(F2FS_FEATURE_LOST_FOUND)) if (c.feature & cpu_to_le32(F2FS_FEATURE_LOST_FOUND))
c.lpf_ino = c.next_free_nid++; c.lpf_ino = c.next_free_nid++;
if (total_zones <= 6) { if (c.feature & cpu_to_le32(F2FS_FEATURE_RO))
avail_zones = 2;
else
avail_zones = 6;
if (total_zones <= avail_zones) {
MSG(1, "\tError: %d zones: Need more zones " MSG(1, "\tError: %d zones: Need more zones "
"by shrinking zone size\n", total_zones); "by shrinking zone size\n", total_zones);
return -1; return -1;
} }
if (c.heap) { if (c.feature & cpu_to_le32(F2FS_FEATURE_RO)) {
c.cur_seg[CURSEG_HOT_NODE] = 0;
c.cur_seg[CURSEG_WARM_NODE] = 0;
c.cur_seg[CURSEG_COLD_NODE] = 0;
c.cur_seg[CURSEG_HOT_DATA] = 1;
c.cur_seg[CURSEG_COLD_DATA] = 0;
c.cur_seg[CURSEG_WARM_DATA] = 0;
} else if (c.heap) {
c.cur_seg[CURSEG_HOT_NODE] = c.cur_seg[CURSEG_HOT_NODE] =
last_section(last_zone(total_zones)); last_section(last_zone(total_zones));
c.cur_seg[CURSEG_WARM_NODE] = prev_zone(CURSEG_HOT_NODE); c.cur_seg[CURSEG_WARM_NODE] = prev_zone(CURSEG_HOT_NODE);
@ -538,7 +562,8 @@ static int f2fs_prepare_super_block(void)
} }
/* if there is redundancy, reassign it */ /* if there is redundancy, reassign it */
verify_cur_segs(); if (!(c.feature & cpu_to_le32(F2FS_FEATURE_RO)))
verify_cur_segs();
cure_extension_list(); cure_extension_list();
@ -731,9 +756,15 @@ static int f2fs_write_check_point_pack(void)
c.reserved_segments); c.reserved_segments);
/* main segments - reserved segments - (node + data segments) */ /* main segments - reserved segments - (node + data segments) */
set_cp(free_segment_count, f2fs_get_usable_segments(sb) - 6); if (c.feature & cpu_to_le32(F2FS_FEATURE_RO)) {
set_cp(user_block_count, ((get_cp(free_segment_count) + 6 - set_cp(free_segment_count, f2fs_get_usable_segments(sb) - 2);
set_cp(user_block_count, ((get_cp(free_segment_count) + 2 -
get_cp(overprov_segment_count)) * c.blks_per_seg)); get_cp(overprov_segment_count)) * c.blks_per_seg));
} else {
set_cp(free_segment_count, f2fs_get_usable_segments(sb) - 6);
set_cp(user_block_count, ((get_cp(free_segment_count) + 6 -
get_cp(overprov_segment_count)) * c.blks_per_seg));
}
/* cp page (2), data summaries (1), node summaries (3) */ /* cp page (2), data summaries (1), node summaries (3) */
set_cp(cp_pack_total_block_count, 6 + get_sb(cp_payload)); set_cp(cp_pack_total_block_count, 6 + get_sb(cp_payload));
flags = CP_UMOUNT_FLAG | CP_COMPACT_SUM_FLAG; flags = CP_UMOUNT_FLAG | CP_COMPACT_SUM_FLAG;
@ -847,8 +878,13 @@ static int f2fs_write_check_point_pack(void)
sum_compact_p += SUM_JOURNAL_SIZE; sum_compact_p += SUM_JOURNAL_SIZE;
memset(sum, 0, sizeof(struct f2fs_summary_block)); memset(sum, 0, sizeof(struct f2fs_summary_block));
/* inode sit for root */ /* inode sit for root */
journal->n_sits = cpu_to_le16(6); if (c.feature & cpu_to_le32(F2FS_FEATURE_RO))
journal->n_sits = cpu_to_le16(2);
else
journal->n_sits = cpu_to_le16(6);
journal->sit_j.entries[0].segno = cp->cur_node_segno[0]; journal->sit_j.entries[0].segno = cp->cur_node_segno[0];
journal->sit_j.entries[0].se.vblocks = journal->sit_j.entries[0].se.vblocks =
cpu_to_le16((CURSEG_HOT_NODE << 10) | cpu_to_le16((CURSEG_HOT_NODE << 10) |
@ -859,30 +895,43 @@ static int f2fs_write_check_point_pack(void)
if (c.lpf_inum) if (c.lpf_inum)
f2fs_set_bit(i, (char *)journal->sit_j.entries[0].se.valid_map); f2fs_set_bit(i, (char *)journal->sit_j.entries[0].se.valid_map);
journal->sit_j.entries[1].segno = cp->cur_node_segno[1]; if (c.feature & cpu_to_le32(F2FS_FEATURE_RO)) {
journal->sit_j.entries[1].se.vblocks = /* data sit for root */
cpu_to_le16((CURSEG_WARM_NODE << 10)); journal->sit_j.entries[1].segno = cp->cur_data_segno[0];
journal->sit_j.entries[2].segno = cp->cur_node_segno[2]; journal->sit_j.entries[1].se.vblocks =
journal->sit_j.entries[2].se.vblocks = cpu_to_le16((CURSEG_HOT_DATA << 10) |
cpu_to_le16((CURSEG_COLD_NODE << 10)); (1 + c.quota_dnum + c.lpf_dnum));
f2fs_set_bit(0, (char *)journal->sit_j.entries[1].se.valid_map);
for (i = 1; i <= c.quota_dnum; i++)
f2fs_set_bit(i, (char *)journal->sit_j.entries[1].se.valid_map);
if (c.lpf_dnum)
f2fs_set_bit(i, (char *)journal->sit_j.entries[1].se.valid_map);
} else {
journal->sit_j.entries[1].segno = cp->cur_node_segno[1];
journal->sit_j.entries[1].se.vblocks =
cpu_to_le16((CURSEG_WARM_NODE << 10));
journal->sit_j.entries[2].segno = cp->cur_node_segno[2];
journal->sit_j.entries[2].se.vblocks =
cpu_to_le16((CURSEG_COLD_NODE << 10));
/* data sit for root */ /* data sit for root */
journal->sit_j.entries[3].segno = cp->cur_data_segno[0]; journal->sit_j.entries[3].segno = cp->cur_data_segno[0];
journal->sit_j.entries[3].se.vblocks = journal->sit_j.entries[3].se.vblocks =
cpu_to_le16((CURSEG_HOT_DATA << 10) | cpu_to_le16((CURSEG_HOT_DATA << 10) |
(1 + c.quota_dnum + c.lpf_dnum)); (1 + c.quota_dnum + c.lpf_dnum));
f2fs_set_bit(0, (char *)journal->sit_j.entries[3].se.valid_map); f2fs_set_bit(0, (char *)journal->sit_j.entries[3].se.valid_map);
for (i = 1; i <= c.quota_dnum; i++) for (i = 1; i <= c.quota_dnum; i++)
f2fs_set_bit(i, (char *)journal->sit_j.entries[3].se.valid_map); f2fs_set_bit(i, (char *)journal->sit_j.entries[3].se.valid_map);
if (c.lpf_dnum) if (c.lpf_dnum)
f2fs_set_bit(i, (char *)journal->sit_j.entries[3].se.valid_map); f2fs_set_bit(i, (char *)journal->sit_j.entries[3].se.valid_map);
journal->sit_j.entries[4].segno = cp->cur_data_segno[1]; journal->sit_j.entries[4].segno = cp->cur_data_segno[1];
journal->sit_j.entries[4].se.vblocks = journal->sit_j.entries[4].se.vblocks =
cpu_to_le16((CURSEG_WARM_DATA << 10)); cpu_to_le16((CURSEG_WARM_DATA << 10));
journal->sit_j.entries[5].segno = cp->cur_data_segno[2]; journal->sit_j.entries[5].segno = cp->cur_data_segno[2];
journal->sit_j.entries[5].se.vblocks = journal->sit_j.entries[5].se.vblocks =
cpu_to_le16((CURSEG_COLD_DATA << 10)); cpu_to_le16((CURSEG_COLD_DATA << 10));
}
memcpy(sum_compact_p, &journal->n_sits, SUM_JOURNAL_SIZE); memcpy(sum_compact_p, &journal->n_sits, SUM_JOURNAL_SIZE);
sum_compact_p += SUM_JOURNAL_SIZE; sum_compact_p += SUM_JOURNAL_SIZE;
@ -1090,7 +1139,7 @@ static int f2fs_discard_obsolete_dnode(void)
u_int64_t start_inode_pos = get_sb(main_blkaddr); u_int64_t start_inode_pos = get_sb(main_blkaddr);
u_int64_t last_inode_pos; u_int64_t last_inode_pos;
if (c.zoned_mode) if (c.zoned_mode || c.feature & cpu_to_le32(F2FS_FEATURE_RO))
return 0; return 0;
raw_node = calloc(sizeof(struct f2fs_node), 1); raw_node = calloc(sizeof(struct f2fs_node), 1);