linux/fs/logfs/inode.c
Prasad Joshi 0bd90387ed logfs: Propagate page parameter to __logfs_write_inode
During GC LogFS has to rewrite each valid block to a separate segment.
Rewrite operation reads data from an old segment and writes it to a
newly allocated segment. Since every write operation changes data
block pointers maintained in inode, inode should also be rewritten.

In GC path to avoid AB-BA deadlock LogFS marks a page with
PG_pre_locked in addition to locking the page (PG_locked). The page
lock is ignored iff the page is pre-locked.

LogFS uses a special file called segment file. The segment file
maintains an 8 bytes entry for every segment. It keeps track of erase
count, level etc. for every segment.

Bad things happen with a segment belonging to the segment file is GCed

 ------------[ cut here ]------------
kernel BUG at /home/prasad/logfs/readwrite.c:297!
invalid opcode: 0000 [#1] SMP
Modules linked in: logfs joydev usbhid hid psmouse e1000 i2c_piix4
		serio_raw [last unloaded: logfs]
Pid: 20161, comm: mount Not tainted 3.1.0-rc3+ #3 innotek GmbH
		VirtualBox
EIP: 0060:[<f809132a>] EFLAGS: 00010292 CPU: 0
EIP is at logfs_lock_write_page+0x6a/0x70 [logfs]
EAX: 00000027 EBX: f73f5b20 ECX: c16007c8 EDX: 00000094
ESI: 00000000 EDI: e59be6e4 EBP: c7337b28 ESP: c7337b18
DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
Process mount (pid: 20161, ti=c7336000 task=eb323f70 task.ti=c7336000)
Stack:
f8099a3d c7337b24 f73f5b20 00001002 c7337b50 f8091f6d f8099a4d f80994e4
00000003 00000000 c7337b68 00000000 c67e4400 00001000 c7337b80 f80935e5
00000000 00000000 00000000 00000000 e1fcf000 0000000f e59be618 c70bf900
Call Trace:
[<f8091f6d>] logfs_get_write_page.clone.16+0xdd/0x100 [logfs]
[<f80935e5>] logfs_mod_segment_entry+0x55/0x110 [logfs]
[<f809460d>] logfs_get_segment_entry+0x1d/0x20 [logfs]
[<f8091060>] ? logfs_cleanup_journal+0x50/0x50 [logfs]
[<f809521b>] ostore_get_erase_count+0x1b/0x40 [logfs]
[<f80965b8>] logfs_open_area+0xc8/0x150 [logfs]
[<c141a7ec>] ? kmemleak_alloc+0x2c/0x60
[<f809668e>] __logfs_segment_write.clone.16+0x4e/0x1b0 [logfs]
[<c10dd563>] ? mempool_kmalloc+0x13/0x20
[<c10dd563>] ? mempool_kmalloc+0x13/0x20
[<f809696f>] logfs_segment_write+0x17f/0x1d0 [logfs]
[<f8092e8c>] logfs_write_i0+0x11c/0x180 [logfs]
[<f8092f35>] logfs_write_direct+0x45/0x90 [logfs]
[<f80934cd>] __logfs_write_buf+0xbd/0xf0 [logfs]
[<c102900e>] ? kmap_atomic_prot+0x4e/0xe0
[<f809424b>] logfs_write_buf+0x3b/0x60 [logfs]
[<f80947a9>] __logfs_write_inode+0xa9/0x110 [logfs]
[<f8094cb0>] logfs_rewrite_block+0xc0/0x110 [logfs]
[<f8095300>] ? get_mapping_page+0x10/0x60 [logfs]
[<f8095aa0>] ? logfs_load_object_aliases+0x2e0/0x2f0 [logfs]
[<f808e57d>] logfs_gc_segment+0x2ad/0x310 [logfs]
[<f808e62a>] __logfs_gc_once+0x4a/0x80 [logfs]
[<f808ed43>] logfs_gc_pass+0x683/0x6a0 [logfs]
[<f8097a89>] logfs_mount+0x5a9/0x680 [logfs]
[<c1126b21>] mount_fs+0x21/0xd0
[<c10f6f6f>] ? __alloc_percpu+0xf/0x20
[<c113da41>] ? alloc_vfsmnt+0xb1/0x130
[<c113db4b>] vfs_kern_mount+0x4b/0xa0
[<c113e06e>] do_kern_mount+0x3e/0xe0
[<c113f60d>] do_mount+0x34d/0x670
[<c10f2749>] ? strndup_user+0x49/0x70
[<c113fcab>] sys_mount+0x6b/0xa0
[<c142d87c>] syscall_call+0x7/0xb
Code: f8 e8 8b 93 39 c9 8b 45 f8 3e 0f ba 28 00 19 d2 85 d2 74 ca eb d0 0f 0b 8d 45 fc 89 44 24 04 c7 04 24 3d 9a 09 f8 e8 09 92 39 c9 <0f> 0b 8d 74 26 00 55 89 e5 3e 8d 74 26 00 8b 10 80 e6 01 74 09
EIP: [<f809132a>] logfs_lock_write_page+0x6a/0x70 [logfs] SS:ESP 0068:c7337b18
---[ end trace 96e67d5b3aa3d6ca ]---

The patch passes locked page to __logfs_write_inode. It calls function
logfs_get_wblocks() to pre-lock the page. This ensures any further
attempts to lock the page are ignored (esp from get_erase_count).

Acked-by: Joern Engel <joern@logfs.org>
Signed-off-by: Prasad Joshi <prasadjoshi.linux@gmail.com>
2012-01-28 11:38:25 +05:30

407 lines
11 KiB
C

/*
* fs/logfs/inode.c - inode handling code
*
* As should be obvious for Linux kernel code, license is GPLv2
*
* Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
*/
#include "logfs.h"
#include <linux/slab.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
/*
* How soon to reuse old inode numbers? LogFS doesn't store deleted inodes
* on the medium. It therefore also lacks a method to store the previous
* generation number for deleted inodes. Instead a single generation number
* is stored which will be used for new inodes. Being just a 32bit counter,
* this can obvious wrap relatively quickly. So we only reuse inodes if we
* know that a fair number of inodes can be created before we have to increment
* the generation again - effectively adding some bits to the counter.
* But being too aggressive here means we keep a very large and very sparse
* inode file, wasting space on indirect blocks.
* So what is a good value? Beats me. 64k seems moderately bad on both
* fronts, so let's use that for now...
*
* NFS sucks, as everyone already knows.
*/
#define INOS_PER_WRAP (0x10000)
/*
* Logfs' requirement to read inodes for garbage collection makes life a bit
* harder. GC may have to read inodes that are in I_FREEING state, when they
* are being written out - and waiting for GC to make progress, naturally.
*
* So we cannot just call iget() or some variant of it, but first have to check
* wether the inode in question might be in I_FREEING state. Therefore we
* maintain our own per-sb list of "almost deleted" inodes and check against
* that list first. Normally this should be at most 1-2 entries long.
*
* Also, inodes have logfs-specific reference counting on top of what the vfs
* does. When .destroy_inode is called, normally the reference count will drop
* to zero and the inode gets deleted. But if GC accessed the inode, its
* refcount will remain nonzero and final deletion will have to wait.
*
* As a result we have two sets of functions to get/put inodes:
* logfs_safe_iget/logfs_safe_iput - safe to call from GC context
* logfs_iget/iput - normal version
*/
static struct kmem_cache *logfs_inode_cache;
static DEFINE_SPINLOCK(logfs_inode_lock);
static void logfs_inode_setops(struct inode *inode)
{
switch (inode->i_mode & S_IFMT) {
case S_IFDIR:
inode->i_op = &logfs_dir_iops;
inode->i_fop = &logfs_dir_fops;
inode->i_mapping->a_ops = &logfs_reg_aops;
break;
case S_IFREG:
inode->i_op = &logfs_reg_iops;
inode->i_fop = &logfs_reg_fops;
inode->i_mapping->a_ops = &logfs_reg_aops;
break;
case S_IFLNK:
inode->i_op = &logfs_symlink_iops;
inode->i_mapping->a_ops = &logfs_reg_aops;
break;
case S_IFSOCK: /* fall through */
case S_IFBLK: /* fall through */
case S_IFCHR: /* fall through */
case S_IFIFO:
init_special_inode(inode, inode->i_mode, inode->i_rdev);
break;
default:
BUG();
}
}
static struct inode *__logfs_iget(struct super_block *sb, ino_t ino)
{
struct inode *inode = iget_locked(sb, ino);
int err;
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
err = logfs_read_inode(inode);
if (err || inode->i_nlink == 0) {
/* inode->i_nlink == 0 can be true when called from
* block validator */
/* set i_nlink to 0 to prevent caching */
clear_nlink(inode);
logfs_inode(inode)->li_flags |= LOGFS_IF_ZOMBIE;
iget_failed(inode);
if (!err)
err = -ENOENT;
return ERR_PTR(err);
}
logfs_inode_setops(inode);
unlock_new_inode(inode);
return inode;
}
struct inode *logfs_iget(struct super_block *sb, ino_t ino)
{
BUG_ON(ino == LOGFS_INO_MASTER);
BUG_ON(ino == LOGFS_INO_SEGFILE);
return __logfs_iget(sb, ino);
}
/*
* is_cached is set to 1 if we hand out a cached inode, 0 otherwise.
* this allows logfs_iput to do the right thing later
*/
struct inode *logfs_safe_iget(struct super_block *sb, ino_t ino, int *is_cached)
{
struct logfs_super *super = logfs_super(sb);
struct logfs_inode *li;
if (ino == LOGFS_INO_MASTER)
return super->s_master_inode;
if (ino == LOGFS_INO_SEGFILE)
return super->s_segfile_inode;
spin_lock(&logfs_inode_lock);
list_for_each_entry(li, &super->s_freeing_list, li_freeing_list)
if (li->vfs_inode.i_ino == ino) {
li->li_refcount++;
spin_unlock(&logfs_inode_lock);
*is_cached = 1;
return &li->vfs_inode;
}
spin_unlock(&logfs_inode_lock);
*is_cached = 0;
return __logfs_iget(sb, ino);
}
static void logfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(logfs_inode_cache, logfs_inode(inode));
}
static void __logfs_destroy_inode(struct inode *inode)
{
struct logfs_inode *li = logfs_inode(inode);
BUG_ON(li->li_block);
list_del(&li->li_freeing_list);
call_rcu(&inode->i_rcu, logfs_i_callback);
}
static void logfs_destroy_inode(struct inode *inode)
{
struct logfs_inode *li = logfs_inode(inode);
BUG_ON(list_empty(&li->li_freeing_list));
spin_lock(&logfs_inode_lock);
li->li_refcount--;
if (li->li_refcount == 0)
__logfs_destroy_inode(inode);
spin_unlock(&logfs_inode_lock);
}
void logfs_safe_iput(struct inode *inode, int is_cached)
{
if (inode->i_ino == LOGFS_INO_MASTER)
return;
if (inode->i_ino == LOGFS_INO_SEGFILE)
return;
if (is_cached) {
logfs_destroy_inode(inode);
return;
}
iput(inode);
}
static void logfs_init_inode(struct super_block *sb, struct inode *inode)
{
struct logfs_inode *li = logfs_inode(inode);
int i;
li->li_flags = 0;
li->li_height = 0;
li->li_used_bytes = 0;
li->li_block = NULL;
inode->i_uid = 0;
inode->i_gid = 0;
inode->i_size = 0;
inode->i_blocks = 0;
inode->i_ctime = CURRENT_TIME;
inode->i_mtime = CURRENT_TIME;
li->li_refcount = 1;
INIT_LIST_HEAD(&li->li_freeing_list);
for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++)
li->li_data[i] = 0;
return;
}
static struct inode *logfs_alloc_inode(struct super_block *sb)
{
struct logfs_inode *li;
li = kmem_cache_alloc(logfs_inode_cache, GFP_NOFS);
if (!li)
return NULL;
logfs_init_inode(sb, &li->vfs_inode);
return &li->vfs_inode;
}
/*
* In logfs inodes are written to an inode file. The inode file, like any
* other file, is managed with a inode. The inode file's inode, aka master
* inode, requires special handling in several respects. First, it cannot be
* written to the inode file, so it is stored in the journal instead.
*
* Secondly, this inode cannot be written back and destroyed before all other
* inodes have been written. The ordering is important. Linux' VFS is happily
* unaware of the ordering constraint and would ordinarily destroy the master
* inode at umount time while other inodes are still in use and dirty. Not
* good.
*
* So logfs makes sure the master inode is not written until all other inodes
* have been destroyed. Sadly, this method has another side-effect. The VFS
* will notice one remaining inode and print a frightening warning message.
* Worse, it is impossible to judge whether such a warning was caused by the
* master inode or any other inodes have leaked as well.
*
* Our attempt of solving this is with logfs_new_meta_inode() below. Its
* purpose is to create a new inode that will not trigger the warning if such
* an inode is still in use. An ugly hack, no doubt. Suggections for
* improvement are welcome.
*
* AV: that's what ->put_super() is for...
*/
struct inode *logfs_new_meta_inode(struct super_block *sb, u64 ino)
{
struct inode *inode;
inode = new_inode(sb);
if (!inode)
return ERR_PTR(-ENOMEM);
inode->i_mode = S_IFREG;
inode->i_ino = ino;
inode->i_data.a_ops = &logfs_reg_aops;
mapping_set_gfp_mask(&inode->i_data, GFP_NOFS);
return inode;
}
struct inode *logfs_read_meta_inode(struct super_block *sb, u64 ino)
{
struct inode *inode;
int err;
inode = logfs_new_meta_inode(sb, ino);
if (IS_ERR(inode))
return inode;
err = logfs_read_inode(inode);
if (err) {
iput(inode);
return ERR_PTR(err);
}
logfs_inode_setops(inode);
return inode;
}
static int logfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int ret;
long flags = WF_LOCK;
/* Can only happen if creat() failed. Safe to skip. */
if (logfs_inode(inode)->li_flags & LOGFS_IF_STILLBORN)
return 0;
ret = __logfs_write_inode(inode, NULL, flags);
LOGFS_BUG_ON(ret, inode->i_sb);
return ret;
}
/* called with inode->i_lock held */
static int logfs_drop_inode(struct inode *inode)
{
struct logfs_super *super = logfs_super(inode->i_sb);
struct logfs_inode *li = logfs_inode(inode);
spin_lock(&logfs_inode_lock);
list_move(&li->li_freeing_list, &super->s_freeing_list);
spin_unlock(&logfs_inode_lock);
return generic_drop_inode(inode);
}
static void logfs_set_ino_generation(struct super_block *sb,
struct inode *inode)
{
struct logfs_super *super = logfs_super(sb);
u64 ino;
mutex_lock(&super->s_journal_mutex);
ino = logfs_seek_hole(super->s_master_inode, super->s_last_ino + 1);
super->s_last_ino = ino;
super->s_inos_till_wrap--;
if (super->s_inos_till_wrap < 0) {
super->s_last_ino = LOGFS_RESERVED_INOS;
super->s_generation++;
super->s_inos_till_wrap = INOS_PER_WRAP;
}
inode->i_ino = ino;
inode->i_generation = super->s_generation;
mutex_unlock(&super->s_journal_mutex);
}
struct inode *logfs_new_inode(struct inode *dir, int mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
inode = new_inode(sb);
if (!inode)
return ERR_PTR(-ENOMEM);
logfs_init_inode(sb, inode);
/* inherit parent flags */
logfs_inode(inode)->li_flags |=
logfs_inode(dir)->li_flags & LOGFS_FL_INHERITED;
inode->i_mode = mode;
logfs_set_ino_generation(sb, inode);
inode_init_owner(inode, dir, mode);
logfs_inode_setops(inode);
insert_inode_hash(inode);
return inode;
}
static void logfs_init_once(void *_li)
{
struct logfs_inode *li = _li;
int i;
li->li_flags = 0;
li->li_used_bytes = 0;
li->li_refcount = 1;
for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++)
li->li_data[i] = 0;
inode_init_once(&li->vfs_inode);
}
static int logfs_sync_fs(struct super_block *sb, int wait)
{
logfs_get_wblocks(sb, NULL, WF_LOCK);
logfs_write_anchor(sb);
logfs_put_wblocks(sb, NULL, WF_LOCK);
return 0;
}
static void logfs_put_super(struct super_block *sb)
{
struct logfs_super *super = logfs_super(sb);
/* kill the meta-inodes */
iput(super->s_master_inode);
iput(super->s_segfile_inode);
iput(super->s_mapping_inode);
}
const struct super_operations logfs_super_operations = {
.alloc_inode = logfs_alloc_inode,
.destroy_inode = logfs_destroy_inode,
.evict_inode = logfs_evict_inode,
.drop_inode = logfs_drop_inode,
.put_super = logfs_put_super,
.write_inode = logfs_write_inode,
.statfs = logfs_statfs,
.sync_fs = logfs_sync_fs,
};
int logfs_init_inode_cache(void)
{
logfs_inode_cache = kmem_cache_create("logfs_inode_cache",
sizeof(struct logfs_inode), 0, SLAB_RECLAIM_ACCOUNT,
logfs_init_once);
if (!logfs_inode_cache)
return -ENOMEM;
return 0;
}
void logfs_destroy_inode_cache(void)
{
kmem_cache_destroy(logfs_inode_cache);
}