mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-29 04:45:05 +00:00
xfs: update for 3.9-rc1
For 3.9-rc1 there are primarily bugfixes and a few cleanups. - fix(es) for compound buffers - remove unused XFS_TRANS_DEBUG routines - fix for dquot soft timer asserts due to overflow of d_blk_softlimit - don't zero allocation args structure members after they are memset(0) - fix for regression in dir v2 code introduced in commit20f7e9f3
- remove obsolete simple_strto<foo> - fix return value when filesystem probe finds no XFS magic, a regression introduced in9802182
. - remove boolean_t typedef completely - fix stack switch in __xfs_bmapi_allocate by moving the check for stack switch up into xfs_bmapi_write. - fix build error due to incomplete boolean_t removal - fix oops in _xfs_buf_find by validating that the requested block is within the filesystem bounds. - limit speculative preallocation near ENOSPC. - fix an unmount hang in xfs_wait_buftarg by freeing the xfs_buf_log_item in xfs_buf_item_unlock. - fix a possible use after free with AIO. - fix xfs_swap_extents after removal of xfs_flushinval_pages, a regression introduced infb59581404
. - replace hardcoded 128 with log header size - add memory barrier before wake_up_bit in xfs_ifunlock - limit speculative preallocation on sparse files - fix xa_lock recursion bug introduced in90810b9e82
- fix write verifier for symlinks -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.10 (GNU/Linux) iQIcBAABAgAGBQJRI/pkAAoJENaLyazVq6ZO8F0QAMUgw+edrht4x/vr0stNpLvL /htIhJVhRjWthQaSiUkmlHShNZCgvfKSRPG9OiaMo24QuqardB3Ieh90JTW8b5Iu 63tQ1kKB++n1PnW7OYSxvUj2WNBHwHlI0ueNUiEFXoD9tQOWcyO63CxA/qnTJB1P gulI+R7gWafqD1UpH6kP/xCMn3V3PnCYVdX2LJHuEjQoCydsKgtc9dgvRi4e51kf OrxnKof6tAlkpNc1+ZmoQch4NS+jRWv90HGdrxOy/BESC0QYSyfrAwxXIVvkwBfu NsQg5oDCp4jlDQt9gjgABhPcWvSGc7JhSjqbXTnweRS4t7SOGGvb46HVrLMKZRID MFT8tbc1HXzAl2z2k0S0bRy9HNC4QZg5GJZ/MhXMKUQWZLZf/XPALgQ5q95hEjsc MUTudN7Xa8Ay+2BPTI34M3f1Mlzmm68N07rzjw8jmJzjVq8v7LJ1wbmpiswjkHiW 6u6MSwY3NDhjNI0sA+h6ByX3KwYfBW9Y63QI/8Dld7jDFQ5onxYmkIP+fPe1vhBJ 51gNm7bKXImEFpykAP5YddCnxMWtFUdIH/Wyu7quMpwv5YSIR+zVd3l4edqAKIgh YlJ9bLewWP+4B2L4raotPjdaOxDZfrW2nP0vmVcuQWEbMdANZ8XQTNPCULcoKGtY iBCDlWS7wAfH8PYnXlbp =/Zui -----END PGP SIGNATURE----- Merge tag 'for-linus-v3.9-rc1' of git://oss.sgi.com/xfs/xfs Pull xfs update from Ben Myers: "Primarily bugfixes and a few cleanups: - fix(es) for compound buffers - remove unused XFS_TRANS_DEBUG routines - fix for dquot soft timer asserts due to overflow of d_blk_softlimit - don't zero allocation args structure members after they are memset(0) - fix for regression in dir v2 code introduced in commit20f7e9f3
- remove obsolete simple_strto<foo> - fix return value when filesystem probe finds no XFS magic, a regression introduced in9802182
. - remove boolean_t typedef completely - fix stack switch in __xfs_bmapi_allocate by moving the check for stack switch up into xfs_bmapi_write. - fix build error due to incomplete boolean_t removal - fix oops in _xfs_buf_find by validating that the requested block is within the filesystem bounds. - limit speculative preallocation near ENOSPC. - fix an unmount hang in xfs_wait_buftarg by freeing the xfs_buf_log_item in xfs_buf_item_unlock. - fix a possible use after free with AIO. - fix xfs_swap_extents after removal of xfs_flushinval_pages, a regression introduced infb59581404
. - replace hardcoded 128 with log header size - add memory barrier before wake_up_bit in xfs_ifunlock - limit speculative preallocation on sparse files - fix xa_lock recursion bug introduced in90810b9e82
- fix write verifier for symlinks" Fixed up conflicts in fs/xfs/xfs_buf_item.c (due to bli_format rename in commit0f22f9d0cd
affecting the removed XFS_TRANS_DEBUG routines in commitec47eb6b0b
). * tag 'for-linus-v3.9-rc1' of git://oss.sgi.com/xfs/xfs: (36 commits) xfs: xfs_bmap_add_attrfork_local is too generic xfs: remove log force from xfs_buf_trylock() xfs: recheck buffer pinned status after push trylock failure xfs: limit speculative prealloc size on sparse files xfs: memory barrier before wake_up_bit() xfs: refactor space log reservation for XFS_TRANS_ATTR_SET xfs: make use of XFS_SB_LOG_RES() at xfs_fs_log_dummy() xfs: make use of XFS_SB_LOG_RES() at xfs_mount_log_sb() xfs: make use of XFS_SB_LOG_RES() at xfs_log_sbcount() xfs: introduce XFS_SB_LOG_RES() for transactions that modify sb on disk xfs: calculate XFS_TRANS_QM_QUOTAOFF_END space log reservation at mount time xfs: calculate XFS_TRANS_QM_QUOTAOFF space log reservation at mount time xfs: calculate XFS_TRANS_QM_DQALLOC space log reservation at mount time xfs: calcuate XFS_TRANS_QM_SETQLIM space log reservation at mount time xfs: calculate xfs_qm_write_sb_changes() space log reservation at mount time xfs: calculate XFS_TRANS_QM_SBCHANGE space log reservation at mount time xfs: make use of xfs_calc_buf_res() in xfs_trans.c xfs: add a helper to figure out the space log reservation per item xfs: Fix xfs_swap_extents() after removal of xfs_flushinval_pages() xfs: Fix possible use-after-free with AIO ...
This commit is contained in:
commit
736a4c1177
@ -1925,8 +1925,6 @@ xfs_alloc_fix_freelist(
|
||||
targs.mp = mp;
|
||||
targs.agbp = agbp;
|
||||
targs.agno = args->agno;
|
||||
targs.mod = targs.minleft = targs.wasdel = targs.userdata =
|
||||
targs.minalignslop = 0;
|
||||
targs.alignment = targs.minlen = targs.prod = targs.isfl = 1;
|
||||
targs.type = XFS_ALLOCTYPE_THIS_AG;
|
||||
targs.pag = pag;
|
||||
|
@ -300,9 +300,12 @@ xfs_attr_set_int(
|
||||
if (rsvd)
|
||||
args.trans->t_flags |= XFS_TRANS_RESERVE;
|
||||
|
||||
if ((error = xfs_trans_reserve(args.trans, args.total,
|
||||
XFS_ATTRSET_LOG_RES(mp, args.total), 0,
|
||||
XFS_TRANS_PERM_LOG_RES, XFS_ATTRSET_LOG_COUNT))) {
|
||||
error = xfs_trans_reserve(args.trans, args.total,
|
||||
XFS_ATTRSETM_LOG_RES(mp) +
|
||||
XFS_ATTRSETRT_LOG_RES(mp) * args.total,
|
||||
0, XFS_TRANS_PERM_LOG_RES,
|
||||
XFS_ATTRSET_LOG_COUNT);
|
||||
if (error) {
|
||||
xfs_trans_cancel(args.trans, 0);
|
||||
return(error);
|
||||
}
|
||||
|
@ -147,7 +147,10 @@ xfs_bmap_local_to_extents(
|
||||
xfs_fsblock_t *firstblock, /* first block allocated in xaction */
|
||||
xfs_extlen_t total, /* total blocks needed by transaction */
|
||||
int *logflagsp, /* inode logging flags */
|
||||
int whichfork); /* data or attr fork */
|
||||
int whichfork, /* data or attr fork */
|
||||
void (*init_fn)(struct xfs_buf *bp,
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_ifork *ifp));
|
||||
|
||||
/*
|
||||
* Search the extents list for the inode, for the extent containing bno.
|
||||
@ -357,7 +360,42 @@ xfs_bmap_add_attrfork_extents(
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from xfs_bmap_add_attrfork to handle local format files.
|
||||
* Block initialisation functions for local to extent format conversion.
|
||||
* As these get more complex, they will be moved to the relevant files,
|
||||
* but for now they are too simple to worry about.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_bmap_local_to_extents_init_fn(
|
||||
struct xfs_buf *bp,
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_ifork *ifp)
|
||||
{
|
||||
bp->b_ops = &xfs_bmbt_buf_ops;
|
||||
memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_symlink_local_to_remote(
|
||||
struct xfs_buf *bp,
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_ifork *ifp)
|
||||
{
|
||||
/* remote symlink blocks are not verifiable until CRCs come along */
|
||||
bp->b_ops = NULL;
|
||||
memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from xfs_bmap_add_attrfork to handle local format files. Each
|
||||
* different data fork content type needs a different callout to do the
|
||||
* conversion. Some are basic and only require special block initialisation
|
||||
* callouts for the data formating, others (directories) are so specialised they
|
||||
* handle everything themselves.
|
||||
*
|
||||
* XXX (dgc): investigate whether directory conversion can use the generic
|
||||
* formatting callout. It should be possible - it's just a very complex
|
||||
* formatter. it would also require passing the transaction through to the init
|
||||
* function.
|
||||
*/
|
||||
STATIC int /* error */
|
||||
xfs_bmap_add_attrfork_local(
|
||||
@ -368,25 +406,29 @@ xfs_bmap_add_attrfork_local(
|
||||
int *flags) /* inode logging flags */
|
||||
{
|
||||
xfs_da_args_t dargs; /* args for dir/attr code */
|
||||
int error; /* error return value */
|
||||
xfs_mount_t *mp; /* mount structure pointer */
|
||||
|
||||
if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
|
||||
return 0;
|
||||
|
||||
if (S_ISDIR(ip->i_d.di_mode)) {
|
||||
mp = ip->i_mount;
|
||||
memset(&dargs, 0, sizeof(dargs));
|
||||
dargs.dp = ip;
|
||||
dargs.firstblock = firstblock;
|
||||
dargs.flist = flist;
|
||||
dargs.total = mp->m_dirblkfsbs;
|
||||
dargs.total = ip->i_mount->m_dirblkfsbs;
|
||||
dargs.whichfork = XFS_DATA_FORK;
|
||||
dargs.trans = tp;
|
||||
error = xfs_dir2_sf_to_block(&dargs);
|
||||
} else
|
||||
error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
|
||||
XFS_DATA_FORK);
|
||||
return error;
|
||||
return xfs_dir2_sf_to_block(&dargs);
|
||||
}
|
||||
|
||||
if (S_ISLNK(ip->i_d.di_mode))
|
||||
return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
|
||||
flags, XFS_DATA_FORK,
|
||||
xfs_symlink_local_to_remote);
|
||||
|
||||
return xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
|
||||
XFS_DATA_FORK,
|
||||
xfs_bmap_local_to_extents_init_fn);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3099,8 +3141,6 @@ xfs_bmap_extents_to_btree(
|
||||
args.fsbno = *firstblock;
|
||||
}
|
||||
args.minlen = args.maxlen = args.prod = 1;
|
||||
args.total = args.minleft = args.alignment = args.mod = args.isfl =
|
||||
args.minalignslop = 0;
|
||||
args.wasdel = wasdel;
|
||||
*logflagsp = 0;
|
||||
if ((error = xfs_alloc_vextent(&args))) {
|
||||
@ -3221,7 +3261,10 @@ xfs_bmap_local_to_extents(
|
||||
xfs_fsblock_t *firstblock, /* first block allocated in xaction */
|
||||
xfs_extlen_t total, /* total blocks needed by transaction */
|
||||
int *logflagsp, /* inode logging flags */
|
||||
int whichfork) /* data or attr fork */
|
||||
int whichfork,
|
||||
void (*init_fn)(struct xfs_buf *bp,
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_ifork *ifp))
|
||||
{
|
||||
int error; /* error return value */
|
||||
int flags; /* logging flags returned */
|
||||
@ -3241,12 +3284,12 @@ xfs_bmap_local_to_extents(
|
||||
xfs_buf_t *bp; /* buffer for extent block */
|
||||
xfs_bmbt_rec_host_t *ep;/* extent record pointer */
|
||||
|
||||
ASSERT((ifp->if_flags &
|
||||
(XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE);
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.tp = tp;
|
||||
args.mp = ip->i_mount;
|
||||
args.firstblock = *firstblock;
|
||||
ASSERT((ifp->if_flags &
|
||||
(XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE);
|
||||
/*
|
||||
* Allocate a block. We know we need only one, since the
|
||||
* file currently fits in an inode.
|
||||
@ -3259,20 +3302,21 @@ xfs_bmap_local_to_extents(
|
||||
args.type = XFS_ALLOCTYPE_NEAR_BNO;
|
||||
}
|
||||
args.total = total;
|
||||
args.mod = args.minleft = args.alignment = args.wasdel =
|
||||
args.isfl = args.minalignslop = 0;
|
||||
args.minlen = args.maxlen = args.prod = 1;
|
||||
if ((error = xfs_alloc_vextent(&args)))
|
||||
error = xfs_alloc_vextent(&args);
|
||||
if (error)
|
||||
goto done;
|
||||
/*
|
||||
* Can't fail, the space was reserved.
|
||||
*/
|
||||
|
||||
/* Can't fail, the space was reserved. */
|
||||
ASSERT(args.fsbno != NULLFSBLOCK);
|
||||
ASSERT(args.len == 1);
|
||||
*firstblock = args.fsbno;
|
||||
bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
|
||||
bp->b_ops = &xfs_bmbt_buf_ops;
|
||||
memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
|
||||
|
||||
/* initialise the block and copy the data */
|
||||
init_fn(bp, ip, ifp);
|
||||
|
||||
/* account for the change in fork size and log everything */
|
||||
xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
|
||||
xfs_bmap_forkoff_reset(args.mp, ip, whichfork);
|
||||
xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
|
||||
@ -4919,8 +4963,32 @@ xfs_bmapi_write(
|
||||
XFS_STATS_INC(xs_blk_mapw);
|
||||
|
||||
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
|
||||
/*
|
||||
* XXX (dgc): This assumes we are only called for inodes that
|
||||
* contain content neutral data in local format. Anything that
|
||||
* contains caller-specific data in local format that needs
|
||||
* transformation to move to a block format needs to do the
|
||||
* conversion to extent format itself.
|
||||
*
|
||||
* Directory data forks and attribute forks handle this
|
||||
* themselves, but with the addition of metadata verifiers every
|
||||
* data fork in local format now contains caller specific data
|
||||
* and as such conversion through this function is likely to be
|
||||
* broken.
|
||||
*
|
||||
* The only likely user of this branch is for remote symlinks,
|
||||
* but we cannot overwrite the data fork contents of the symlink
|
||||
* (EEXIST occurs higher up the stack) and so it will never go
|
||||
* from local format to extent format here. Hence I don't think
|
||||
* this branch is ever executed intentionally and we should
|
||||
* consider removing it and asserting that xfs_bmapi_write()
|
||||
* cannot be called directly on local format forks. i.e. callers
|
||||
* are completely responsible for local to extent format
|
||||
* conversion, not xfs_bmapi_write().
|
||||
*/
|
||||
error = xfs_bmap_local_to_extents(tp, ip, firstblock, total,
|
||||
&bma.logflags, whichfork);
|
||||
&bma.logflags, whichfork,
|
||||
xfs_bmap_local_to_extents_init_fn);
|
||||
if (error)
|
||||
goto error0;
|
||||
}
|
||||
|
@ -951,8 +951,6 @@ xfs_buf_trylock(
|
||||
locked = down_trylock(&bp->b_sema) == 0;
|
||||
if (locked)
|
||||
XB_SET_OWNER(bp);
|
||||
else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
|
||||
xfs_log_force(bp->b_target->bt_mount, 0);
|
||||
|
||||
trace_xfs_buf_trylock(bp, _RET_IP_);
|
||||
return locked;
|
||||
|
@ -37,109 +37,6 @@ static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
|
||||
return container_of(lip, struct xfs_buf_log_item, bli_item);
|
||||
}
|
||||
|
||||
|
||||
#ifdef XFS_TRANS_DEBUG
|
||||
/*
|
||||
* This function uses an alternate strategy for tracking the bytes
|
||||
* that the user requests to be logged. This can then be used
|
||||
* in conjunction with the bli_orig array in the buf log item to
|
||||
* catch bugs in our callers' code.
|
||||
*
|
||||
* We also double check the bits set in xfs_buf_item_log using a
|
||||
* simple algorithm to check that every byte is accounted for.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_buf_item_log_debug(
|
||||
xfs_buf_log_item_t *bip,
|
||||
uint first,
|
||||
uint last)
|
||||
{
|
||||
uint x;
|
||||
uint byte;
|
||||
uint nbytes;
|
||||
uint chunk_num;
|
||||
uint word_num;
|
||||
uint bit_num;
|
||||
uint bit_set;
|
||||
uint *wordp;
|
||||
|
||||
ASSERT(bip->bli_logged != NULL);
|
||||
byte = first;
|
||||
nbytes = last - first + 1;
|
||||
bfset(bip->bli_logged, first, nbytes);
|
||||
for (x = 0; x < nbytes; x++) {
|
||||
chunk_num = byte >> XFS_BLF_SHIFT;
|
||||
word_num = chunk_num >> BIT_TO_WORD_SHIFT;
|
||||
bit_num = chunk_num & (NBWORD - 1);
|
||||
wordp = &(bip->__bli_format.blf_data_map[word_num]);
|
||||
bit_set = *wordp & (1 << bit_num);
|
||||
ASSERT(bit_set);
|
||||
byte++;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is called when we flush something into a buffer without
|
||||
* logging it. This happens for things like inodes which are logged
|
||||
* separately from the buffer.
|
||||
*/
|
||||
void
|
||||
xfs_buf_item_flush_log_debug(
|
||||
xfs_buf_t *bp,
|
||||
uint first,
|
||||
uint last)
|
||||
{
|
||||
xfs_buf_log_item_t *bip = bp->b_fspriv;
|
||||
uint nbytes;
|
||||
|
||||
if (bip == NULL || (bip->bli_item.li_type != XFS_LI_BUF))
|
||||
return;
|
||||
|
||||
ASSERT(bip->bli_logged != NULL);
|
||||
nbytes = last - first + 1;
|
||||
bfset(bip->bli_logged, first, nbytes);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is called to verify that our callers have logged
|
||||
* all the bytes that they changed.
|
||||
*
|
||||
* It does this by comparing the original copy of the buffer stored in
|
||||
* the buf log item's bli_orig array to the current copy of the buffer
|
||||
* and ensuring that all bytes which mismatch are set in the bli_logged
|
||||
* array of the buf log item.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_buf_item_log_check(
|
||||
xfs_buf_log_item_t *bip)
|
||||
{
|
||||
char *orig;
|
||||
char *buffer;
|
||||
int x;
|
||||
xfs_buf_t *bp;
|
||||
|
||||
ASSERT(bip->bli_orig != NULL);
|
||||
ASSERT(bip->bli_logged != NULL);
|
||||
|
||||
bp = bip->bli_buf;
|
||||
ASSERT(bp->b_length > 0);
|
||||
ASSERT(bp->b_addr != NULL);
|
||||
orig = bip->bli_orig;
|
||||
buffer = bp->b_addr;
|
||||
for (x = 0; x < BBTOB(bp->b_length); x++) {
|
||||
if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) {
|
||||
xfs_emerg(bp->b_mount,
|
||||
"%s: bip %x buffer %x orig %x index %d",
|
||||
__func__, bip, bp, orig, x);
|
||||
ASSERT(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
#define xfs_buf_item_log_debug(x,y,z)
|
||||
#define xfs_buf_item_log_check(x)
|
||||
#endif
|
||||
|
||||
STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
|
||||
|
||||
/*
|
||||
@ -429,7 +326,6 @@ xfs_buf_item_format(
|
||||
* Check to make sure everything is consistent.
|
||||
*/
|
||||
trace_xfs_buf_item_format(bip);
|
||||
xfs_buf_item_log_check(bip);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -573,8 +469,18 @@ xfs_buf_item_push(
|
||||
|
||||
if (xfs_buf_ispinned(bp))
|
||||
return XFS_ITEM_PINNED;
|
||||
if (!xfs_buf_trylock(bp))
|
||||
if (!xfs_buf_trylock(bp)) {
|
||||
/*
|
||||
* If we have just raced with a buffer being pinned and it has
|
||||
* been marked stale, we could end up stalling until someone else
|
||||
* issues a log force to unpin the stale buffer. Check for the
|
||||
* race condition here so xfsaild recognizes the buffer is pinned
|
||||
* and queues a log force to move it along.
|
||||
*/
|
||||
if (xfs_buf_ispinned(bp))
|
||||
return XFS_ITEM_PINNED;
|
||||
return XFS_ITEM_LOCKED;
|
||||
}
|
||||
|
||||
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
|
||||
|
||||
@ -923,8 +829,6 @@ xfs_buf_item_log_segment(
|
||||
mask = (1 << end_bit) - 1;
|
||||
*wordp |= mask;
|
||||
}
|
||||
|
||||
xfs_buf_item_log_debug(bip, first, last);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -98,10 +98,6 @@ typedef struct xfs_buf_log_item {
|
||||
unsigned int bli_flags; /* misc flags */
|
||||
unsigned int bli_recur; /* lock recursion count */
|
||||
atomic_t bli_refcount; /* cnt of tp refs */
|
||||
#ifdef XFS_TRANS_DEBUG
|
||||
char *bli_orig; /* original buffer copy */
|
||||
char *bli_logged; /* bytes logged (bitmap) */
|
||||
#endif
|
||||
int bli_format_count; /* count of headers */
|
||||
struct xfs_buf_log_format *bli_formats; /* array of in-log header ptrs */
|
||||
struct xfs_buf_log_format __bli_format; /* embedded in-log header */
|
||||
@ -117,16 +113,6 @@ void xfs_buf_attach_iodone(struct xfs_buf *,
|
||||
void xfs_buf_iodone_callbacks(struct xfs_buf *);
|
||||
void xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *);
|
||||
|
||||
#ifdef XFS_TRANS_DEBUG
|
||||
void
|
||||
xfs_buf_item_flush_log_debug(
|
||||
struct xfs_buf *bp,
|
||||
uint first,
|
||||
uint last);
|
||||
#else
|
||||
#define xfs_buf_item_flush_log_debug(bp, first, last)
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* __XFS_BUF_ITEM_H__ */
|
||||
|
@ -612,15 +612,9 @@ xfs_qm_dqread(
|
||||
if (flags & XFS_QMOPT_DQALLOC) {
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
|
||||
error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp),
|
||||
XFS_WRITE_LOG_RES(mp) +
|
||||
/*
|
||||
* Round the chunklen up to the next multiple
|
||||
* of 128 (buf log item chunk size)).
|
||||
*/
|
||||
BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 + 128,
|
||||
0,
|
||||
XFS_TRANS_PERM_LOG_RES,
|
||||
XFS_WRITE_LOG_COUNT);
|
||||
XFS_QM_DQALLOC_LOG_RES(mp), 0,
|
||||
XFS_TRANS_PERM_LOG_RES,
|
||||
XFS_WRITE_LOG_COUNT);
|
||||
if (error)
|
||||
goto error1;
|
||||
cancelflags = XFS_TRANS_RELEASE_LOG_RES;
|
||||
|
@ -709,8 +709,8 @@ xfs_fs_log_dummy(
|
||||
int error;
|
||||
|
||||
tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
|
||||
error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
|
||||
XFS_DEFAULT_LOG_COUNT);
|
||||
error = xfs_trans_reserve(tp, 0, XFS_SB_LOG_RES(mp), 0, 0,
|
||||
XFS_DEFAULT_LOG_COUNT);
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp, 0);
|
||||
return error;
|
||||
|
@ -279,8 +279,6 @@ xfs_ialloc_ag_alloc(
|
||||
(args.agbno < be32_to_cpu(agi->agi_length)))) {
|
||||
args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
|
||||
args.type = XFS_ALLOCTYPE_THIS_BNO;
|
||||
args.mod = args.total = args.wasdel = args.isfl =
|
||||
args.userdata = args.minalignslop = 0;
|
||||
args.prod = 1;
|
||||
|
||||
/*
|
||||
@ -333,8 +331,6 @@ xfs_ialloc_ag_alloc(
|
||||
* Allocate a fixed-size extent of inodes.
|
||||
*/
|
||||
args.type = XFS_ALLOCTYPE_NEAR_BNO;
|
||||
args.mod = args.total = args.wasdel = args.isfl =
|
||||
args.userdata = args.minalignslop = 0;
|
||||
args.prod = 1;
|
||||
/*
|
||||
* Allow space for the inode btree to split.
|
||||
|
@ -2379,9 +2379,6 @@ xfs_iflush_fork(
|
||||
char *cp;
|
||||
xfs_ifork_t *ifp;
|
||||
xfs_mount_t *mp;
|
||||
#ifdef XFS_TRANS_DEBUG
|
||||
int first;
|
||||
#endif
|
||||
static const short brootflag[2] =
|
||||
{ XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
|
||||
static const short dataflag[2] =
|
||||
@ -2724,9 +2721,6 @@ xfs_iflush_int(
|
||||
xfs_inode_log_item_t *iip;
|
||||
xfs_dinode_t *dip;
|
||||
xfs_mount_t *mp;
|
||||
#ifdef XFS_TRANS_DEBUG
|
||||
int first;
|
||||
#endif
|
||||
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
|
||||
ASSERT(xfs_isiflocked(ip));
|
||||
|
@ -419,6 +419,7 @@ static inline void xfs_iflock(struct xfs_inode *ip)
|
||||
static inline void xfs_ifunlock(struct xfs_inode *ip)
|
||||
{
|
||||
xfs_iflags_clear(ip, XFS_IFLOCK);
|
||||
smp_mb();
|
||||
wake_up_bit(&ip->i_flags, __XFS_IFLOCK_BIT);
|
||||
}
|
||||
|
||||
|
@ -269,17 +269,6 @@ xfs_inode_item_format(
|
||||
} else {
|
||||
ASSERT(!(iip->ili_fields &
|
||||
XFS_ILOG_DBROOT));
|
||||
#ifdef XFS_TRANS_DEBUG
|
||||
if (iip->ili_root_size > 0) {
|
||||
ASSERT(iip->ili_root_size ==
|
||||
ip->i_df.if_broot_bytes);
|
||||
ASSERT(memcmp(iip->ili_orig_root,
|
||||
ip->i_df.if_broot,
|
||||
iip->ili_root_size) == 0);
|
||||
} else {
|
||||
ASSERT(ip->i_df.if_broot_bytes == 0);
|
||||
}
|
||||
#endif
|
||||
iip->ili_fields &= ~XFS_ILOG_DBROOT;
|
||||
}
|
||||
break;
|
||||
@ -678,11 +667,6 @@ void
|
||||
xfs_inode_item_destroy(
|
||||
xfs_inode_t *ip)
|
||||
{
|
||||
#ifdef XFS_TRANS_DEBUG
|
||||
if (ip->i_itemp->ili_root_size != 0) {
|
||||
kmem_free(ip->i_itemp->ili_orig_root);
|
||||
}
|
||||
#endif
|
||||
kmem_zone_free(xfs_ili_zone, ip->i_itemp);
|
||||
}
|
||||
|
||||
|
@ -148,10 +148,6 @@ typedef struct xfs_inode_log_item {
|
||||
data exts */
|
||||
struct xfs_bmbt_rec *ili_aextents_buf; /* array of logged
|
||||
attr exts */
|
||||
#ifdef XFS_TRANS_DEBUG
|
||||
int ili_root_size;
|
||||
char *ili_orig_root;
|
||||
#endif
|
||||
xfs_inode_log_format_t ili_format; /* logged structure */
|
||||
} xfs_inode_log_item_t;
|
||||
|
||||
|
@ -310,6 +310,62 @@ xfs_iomap_eof_want_preallocate(
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine the initial size of the preallocation. We are beyond the current
|
||||
* EOF here, but we need to take into account whether this is a sparse write or
|
||||
* an extending write when determining the preallocation size. Hence we need to
|
||||
* look up the extent that ends at the current write offset and use the result
|
||||
* to determine the preallocation size.
|
||||
*
|
||||
* If the extent is a hole, then preallocation is essentially disabled.
|
||||
* Otherwise we take the size of the preceeding data extent as the basis for the
|
||||
* preallocation size. If the size of the extent is greater than half the
|
||||
* maximum extent length, then use the current offset as the basis. This ensures
|
||||
* that for large files the preallocation size always extends to MAXEXTLEN
|
||||
* rather than falling short due to things like stripe unit/width alignment of
|
||||
* real extents.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_iomap_eof_prealloc_initial_size(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_inode *ip,
|
||||
xfs_off_t offset,
|
||||
xfs_bmbt_irec_t *imap,
|
||||
int nimaps)
|
||||
{
|
||||
xfs_fileoff_t start_fsb;
|
||||
int imaps = 1;
|
||||
int error;
|
||||
|
||||
ASSERT(nimaps >= imaps);
|
||||
|
||||
/* if we are using a specific prealloc size, return now */
|
||||
if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* As we write multiple pages, the offset will always align to the
|
||||
* start of a page and hence point to a hole at EOF. i.e. if the size is
|
||||
* 4096 bytes, we only have one block at FSB 0, but XFS_B_TO_FSB(4096)
|
||||
* will return FSB 1. Hence if there are blocks in the file, we want to
|
||||
* point to the block prior to the EOF block and not the hole that maps
|
||||
* directly at @offset.
|
||||
*/
|
||||
start_fsb = XFS_B_TO_FSB(mp, offset);
|
||||
if (start_fsb)
|
||||
start_fsb--;
|
||||
error = xfs_bmapi_read(ip, start_fsb, 1, imap, &imaps, XFS_BMAPI_ENTIRE);
|
||||
if (error)
|
||||
return 0;
|
||||
|
||||
ASSERT(imaps == 1);
|
||||
if (imap[0].br_startblock == HOLESTARTBLOCK)
|
||||
return 0;
|
||||
if (imap[0].br_blockcount <= (MAXEXTLEN >> 1))
|
||||
return imap[0].br_blockcount;
|
||||
return XFS_B_TO_FSB(mp, offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we don't have a user specified preallocation size, dynamically increase
|
||||
* the preallocation size as the size of the file grows. Cap the maximum size
|
||||
@ -319,20 +375,19 @@ xfs_iomap_eof_want_preallocate(
|
||||
STATIC xfs_fsblock_t
|
||||
xfs_iomap_prealloc_size(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_inode *ip)
|
||||
struct xfs_inode *ip,
|
||||
xfs_off_t offset,
|
||||
struct xfs_bmbt_irec *imap,
|
||||
int nimaps)
|
||||
{
|
||||
xfs_fsblock_t alloc_blocks = 0;
|
||||
|
||||
if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
|
||||
alloc_blocks = xfs_iomap_eof_prealloc_initial_size(mp, ip, offset,
|
||||
imap, nimaps);
|
||||
if (alloc_blocks > 0) {
|
||||
int shift = 0;
|
||||
int64_t freesp;
|
||||
|
||||
/*
|
||||
* rounddown_pow_of_two() returns an undefined result
|
||||
* if we pass in alloc_blocks = 0. Hence the "+ 1" to
|
||||
* ensure we always pass in a non-zero value.
|
||||
*/
|
||||
alloc_blocks = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)) + 1;
|
||||
alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN,
|
||||
rounddown_pow_of_two(alloc_blocks));
|
||||
|
||||
@ -399,7 +454,6 @@ xfs_iomap_write_delay(
|
||||
extsz = xfs_get_extsz_hint(ip);
|
||||
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
||||
|
||||
|
||||
error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
|
||||
imap, XFS_WRITE_IMAPS, &prealloc);
|
||||
if (error)
|
||||
@ -407,7 +461,10 @@ xfs_iomap_write_delay(
|
||||
|
||||
retry:
|
||||
if (prealloc) {
|
||||
xfs_fsblock_t alloc_blocks = xfs_iomap_prealloc_size(mp, ip);
|
||||
xfs_fsblock_t alloc_blocks;
|
||||
|
||||
alloc_blocks = xfs_iomap_prealloc_size(mp, ip, offset, imap,
|
||||
XFS_WRITE_IMAPS);
|
||||
|
||||
aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
|
||||
ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
|
||||
|
@ -120,7 +120,7 @@ xlog_verify_iclog(
|
||||
struct xlog *log,
|
||||
struct xlog_in_core *iclog,
|
||||
int count,
|
||||
boolean_t syncing);
|
||||
bool syncing);
|
||||
STATIC void
|
||||
xlog_verify_tail_lsn(
|
||||
struct xlog *log,
|
||||
@ -1737,7 +1737,7 @@ xlog_sync(
|
||||
ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
|
||||
ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
|
||||
|
||||
xlog_verify_iclog(log, iclog, count, B_TRUE);
|
||||
xlog_verify_iclog(log, iclog, count, true);
|
||||
|
||||
/* account for log which doesn't start at block #0 */
|
||||
XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
|
||||
@ -3611,7 +3611,7 @@ xlog_verify_iclog(
|
||||
struct xlog *log,
|
||||
struct xlog_in_core *iclog,
|
||||
int count,
|
||||
boolean_t syncing)
|
||||
bool syncing)
|
||||
{
|
||||
xlog_op_header_t *ophead;
|
||||
xlog_in_core_t *icptr;
|
||||
@ -3659,7 +3659,7 @@ xlog_verify_iclog(
|
||||
/* clientid is only 1 byte */
|
||||
field_offset = (__psint_t)
|
||||
((xfs_caddr_t)&(ophead->oh_clientid) - base_ptr);
|
||||
if (syncing == B_FALSE || (field_offset & 0x1ff)) {
|
||||
if (!syncing || (field_offset & 0x1ff)) {
|
||||
clientid = ophead->oh_clientid;
|
||||
} else {
|
||||
idx = BTOBBT((xfs_caddr_t)&(ophead->oh_clientid) - iclog->ic_datap);
|
||||
@ -3682,7 +3682,7 @@ xlog_verify_iclog(
|
||||
/* check length */
|
||||
field_offset = (__psint_t)
|
||||
((xfs_caddr_t)&(ophead->oh_len) - base_ptr);
|
||||
if (syncing == B_FALSE || (field_offset & 0x1ff)) {
|
||||
if (!syncing || (field_offset & 0x1ff)) {
|
||||
op_len = be32_to_cpu(ophead->oh_len);
|
||||
} else {
|
||||
idx = BTOBBT((__psint_t)&ophead->oh_len -
|
||||
|
@ -1109,8 +1109,8 @@ xfs_mount_reset_sbqflags(
|
||||
return 0;
|
||||
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
|
||||
error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
|
||||
XFS_DEFAULT_LOG_COUNT);
|
||||
error = xfs_trans_reserve(tp, 0, XFS_QM_SBCHANGE_LOG_RES(mp),
|
||||
0, 0, XFS_DEFAULT_LOG_COUNT);
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp, 0);
|
||||
xfs_alert(mp, "%s: Superblock update failed!", __func__);
|
||||
@ -1583,8 +1583,8 @@ xfs_log_sbcount(xfs_mount_t *mp)
|
||||
return 0;
|
||||
|
||||
tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP);
|
||||
error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
|
||||
XFS_DEFAULT_LOG_COUNT);
|
||||
error = xfs_trans_reserve(tp, 0, XFS_SB_LOG_RES(mp), 0, 0,
|
||||
XFS_DEFAULT_LOG_COUNT);
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp, 0);
|
||||
return error;
|
||||
@ -1945,8 +1945,8 @@ xfs_mount_log_sb(
|
||||
XFS_SB_VERSIONNUM));
|
||||
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
|
||||
error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
|
||||
XFS_DEFAULT_LOG_COUNT);
|
||||
error = xfs_trans_reserve(tp, 0, XFS_SB_LOG_RES(mp), 0, 0,
|
||||
XFS_DEFAULT_LOG_COUNT);
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp, 0);
|
||||
return error;
|
||||
|
@ -34,12 +34,19 @@ typedef struct xfs_trans_reservations {
|
||||
uint tr_addafork; /* cvt inode to attributed trans */
|
||||
uint tr_writeid; /* write setuid/setgid file */
|
||||
uint tr_attrinval; /* attr fork buffer invalidation */
|
||||
uint tr_attrset; /* set/create an attribute */
|
||||
uint tr_attrsetm; /* set/create an attribute at mount time */
|
||||
uint tr_attrsetrt; /* set/create an attribute at runtime */
|
||||
uint tr_attrrm; /* remove an attribute */
|
||||
uint tr_clearagi; /* clear bad agi unlinked ino bucket */
|
||||
uint tr_growrtalloc; /* grow realtime allocations */
|
||||
uint tr_growrtzero; /* grow realtime zeroing */
|
||||
uint tr_growrtfree; /* grow realtime freeing */
|
||||
uint tr_qm_sbchange; /* change quota flags */
|
||||
uint tr_qm_setqlim; /* adjust quota limits */
|
||||
uint tr_qm_dqalloc; /* allocate quota on disk */
|
||||
uint tr_qm_quotaoff; /* turn quota off */
|
||||
uint tr_qm_equotaoff;/* end of turn quota off */
|
||||
uint tr_sb; /* modify superblock */
|
||||
} xfs_trans_reservations_t;
|
||||
|
||||
#ifndef __KERNEL__
|
||||
|
@ -1584,10 +1584,9 @@ xfs_qm_write_sb_changes(
|
||||
int error;
|
||||
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
|
||||
if ((error = xfs_trans_reserve(tp, 0,
|
||||
mp->m_sb.sb_sectsize + 128, 0,
|
||||
0,
|
||||
XFS_DEFAULT_LOG_COUNT))) {
|
||||
error = xfs_trans_reserve(tp, 0, XFS_QM_SBCHANGE_LOG_RES(mp),
|
||||
0, 0, XFS_DEFAULT_LOG_COUNT);
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp, 0);
|
||||
return error;
|
||||
}
|
||||
|
@ -146,7 +146,7 @@ xfs_qm_newmount(
|
||||
* inode goes inactive and wants to free blocks,
|
||||
* or via xfs_log_mount_finish.
|
||||
*/
|
||||
*needquotamount = B_TRUE;
|
||||
*needquotamount = true;
|
||||
*quotaflags = mp->m_qflags;
|
||||
mp->m_qflags = 0;
|
||||
}
|
||||
|
@ -408,10 +408,10 @@ xfs_qm_scall_getqstat(
|
||||
{
|
||||
struct xfs_quotainfo *q = mp->m_quotainfo;
|
||||
struct xfs_inode *uip, *gip;
|
||||
boolean_t tempuqip, tempgqip;
|
||||
bool tempuqip, tempgqip;
|
||||
|
||||
uip = gip = NULL;
|
||||
tempuqip = tempgqip = B_FALSE;
|
||||
tempuqip = tempgqip = false;
|
||||
memset(out, 0, sizeof(fs_quota_stat_t));
|
||||
|
||||
out->qs_version = FS_QSTAT_VERSION;
|
||||
@ -434,12 +434,12 @@ xfs_qm_scall_getqstat(
|
||||
if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
|
||||
if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
|
||||
0, 0, &uip) == 0)
|
||||
tempuqip = B_TRUE;
|
||||
tempuqip = true;
|
||||
}
|
||||
if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
|
||||
if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
|
||||
0, 0, &gip) == 0)
|
||||
tempgqip = B_TRUE;
|
||||
tempgqip = true;
|
||||
}
|
||||
if (uip) {
|
||||
out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
|
||||
@ -490,8 +490,9 @@ xfs_qm_scall_setqlim(
|
||||
return 0;
|
||||
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
|
||||
if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,
|
||||
0, 0, XFS_DEFAULT_LOG_COUNT))) {
|
||||
error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp),
|
||||
0, 0, XFS_DEFAULT_LOG_COUNT);
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp, 0);
|
||||
return (error);
|
||||
}
|
||||
@ -638,8 +639,9 @@ xfs_qm_log_quotaoff_end(
|
||||
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END);
|
||||
|
||||
if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_qoff_logitem_t) * 2,
|
||||
0, 0, XFS_DEFAULT_LOG_COUNT))) {
|
||||
error = xfs_trans_reserve(tp, 0, XFS_QM_QUOTAOFF_END_LOG_RES(mp),
|
||||
0, 0, XFS_DEFAULT_LOG_COUNT);
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp, 0);
|
||||
return (error);
|
||||
}
|
||||
@ -671,14 +673,10 @@ xfs_qm_log_quotaoff(
|
||||
uint oldsbqflag=0;
|
||||
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
|
||||
if ((error = xfs_trans_reserve(tp, 0,
|
||||
sizeof(xfs_qoff_logitem_t) * 2 +
|
||||
mp->m_sb.sb_sectsize + 128,
|
||||
0,
|
||||
0,
|
||||
XFS_DEFAULT_LOG_COUNT))) {
|
||||
error = xfs_trans_reserve(tp, 0, XFS_QM_QUOTAOFF_LOG_RES(mp),
|
||||
0, 0, XFS_DEFAULT_LOG_COUNT);
|
||||
if (error)
|
||||
goto error0;
|
||||
}
|
||||
|
||||
qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
|
||||
xfs_trans_log_quotaoff_item(tp, qoffi);
|
||||
|
@ -139,9 +139,9 @@ static const match_table_t tokens = {
|
||||
|
||||
|
||||
STATIC unsigned long
|
||||
suffix_strtoul(char *s, char **endp, unsigned int base)
|
||||
suffix_kstrtoint(char *s, unsigned int base, int *res)
|
||||
{
|
||||
int last, shift_left_factor = 0;
|
||||
int last, shift_left_factor = 0, _res;
|
||||
char *value = s;
|
||||
|
||||
last = strlen(value) - 1;
|
||||
@ -158,7 +158,10 @@ suffix_strtoul(char *s, char **endp, unsigned int base)
|
||||
value[last] = '\0';
|
||||
}
|
||||
|
||||
return simple_strtoul((const char *)s, endp, base) << shift_left_factor;
|
||||
if (kstrtoint(s, base, &_res))
|
||||
return -EINVAL;
|
||||
*res = _res << shift_left_factor;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -174,7 +177,7 @@ xfs_parseargs(
|
||||
char *options)
|
||||
{
|
||||
struct super_block *sb = mp->m_super;
|
||||
char *this_char, *value, *eov;
|
||||
char *this_char, *value;
|
||||
int dsunit = 0;
|
||||
int dswidth = 0;
|
||||
int iosize = 0;
|
||||
@ -230,14 +233,16 @@ xfs_parseargs(
|
||||
this_char);
|
||||
return EINVAL;
|
||||
}
|
||||
mp->m_logbufs = simple_strtoul(value, &eov, 10);
|
||||
if (kstrtoint(value, 10, &mp->m_logbufs))
|
||||
return EINVAL;
|
||||
} else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
|
||||
if (!value || !*value) {
|
||||
xfs_warn(mp, "%s option requires an argument",
|
||||
this_char);
|
||||
return EINVAL;
|
||||
}
|
||||
mp->m_logbsize = suffix_strtoul(value, &eov, 10);
|
||||
if (suffix_kstrtoint(value, 10, &mp->m_logbsize))
|
||||
return EINVAL;
|
||||
} else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
|
||||
if (!value || !*value) {
|
||||
xfs_warn(mp, "%s option requires an argument",
|
||||
@ -266,7 +271,8 @@ xfs_parseargs(
|
||||
this_char);
|
||||
return EINVAL;
|
||||
}
|
||||
iosize = simple_strtoul(value, &eov, 10);
|
||||
if (kstrtoint(value, 10, &iosize))
|
||||
return EINVAL;
|
||||
iosizelog = ffs(iosize) - 1;
|
||||
} else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) {
|
||||
if (!value || !*value) {
|
||||
@ -274,7 +280,8 @@ xfs_parseargs(
|
||||
this_char);
|
||||
return EINVAL;
|
||||
}
|
||||
iosize = suffix_strtoul(value, &eov, 10);
|
||||
if (suffix_kstrtoint(value, 10, &iosize))
|
||||
return EINVAL;
|
||||
iosizelog = ffs(iosize) - 1;
|
||||
} else if (!strcmp(this_char, MNTOPT_GRPID) ||
|
||||
!strcmp(this_char, MNTOPT_BSDGROUPS)) {
|
||||
@ -296,14 +303,16 @@ xfs_parseargs(
|
||||
this_char);
|
||||
return EINVAL;
|
||||
}
|
||||
dsunit = simple_strtoul(value, &eov, 10);
|
||||
if (kstrtoint(value, 10, &dsunit))
|
||||
return EINVAL;
|
||||
} else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
|
||||
if (!value || !*value) {
|
||||
xfs_warn(mp, "%s option requires an argument",
|
||||
this_char);
|
||||
return EINVAL;
|
||||
}
|
||||
dswidth = simple_strtoul(value, &eov, 10);
|
||||
if (kstrtoint(value, 10, &dswidth))
|
||||
return EINVAL;
|
||||
} else if (!strcmp(this_char, MNTOPT_32BITINODE)) {
|
||||
mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
|
||||
} else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
|
||||
|
@ -37,14 +37,45 @@
|
||||
#include "xfs_extent_busy.h"
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_quota.h"
|
||||
#include "xfs_qm.h"
|
||||
#include "xfs_trans_priv.h"
|
||||
#include "xfs_trans_space.h"
|
||||
#include "xfs_inode_item.h"
|
||||
#include "xfs_log_priv.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_trace.h"
|
||||
|
||||
kmem_zone_t *xfs_trans_zone;
|
||||
kmem_zone_t *xfs_log_item_desc_zone;
|
||||
|
||||
/*
|
||||
* A buffer has a format structure overhead in the log in addition
|
||||
* to the data, so we need to take this into account when reserving
|
||||
* space in a transaction for a buffer. Round the space required up
|
||||
* to a multiple of 128 bytes so that we don't change the historical
|
||||
* reservation that has been used for this overhead.
|
||||
*/
|
||||
STATIC uint
|
||||
xfs_buf_log_overhead(void)
|
||||
{
|
||||
return round_up(sizeof(struct xlog_op_header) +
|
||||
sizeof(struct xfs_buf_log_format), 128);
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate out transaction log reservation per item in bytes.
|
||||
*
|
||||
* The nbufs argument is used to indicate the number of items that
|
||||
* will be changed in a transaction. size is used to tell how many
|
||||
* bytes should be reserved per item.
|
||||
*/
|
||||
STATIC uint
|
||||
xfs_calc_buf_res(
|
||||
uint nbufs,
|
||||
uint size)
|
||||
{
|
||||
return nbufs * (size + xfs_buf_log_overhead());
|
||||
}
|
||||
|
||||
/*
|
||||
* Various log reservation values.
|
||||
@ -85,18 +116,15 @@ xfs_calc_write_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return XFS_DQUOT_LOGRES(mp) +
|
||||
MAX((mp->m_sb.sb_inodesize +
|
||||
XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)) +
|
||||
2 * mp->m_sb.sb_sectsize +
|
||||
mp->m_sb.sb_sectsize +
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 2) +
|
||||
128 * (4 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) +
|
||||
XFS_ALLOCFREE_LOG_COUNT(mp, 2))),
|
||||
(2 * mp->m_sb.sb_sectsize +
|
||||
2 * mp->m_sb.sb_sectsize +
|
||||
mp->m_sb.sb_sectsize +
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 2) +
|
||||
128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2))));
|
||||
MAX((xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
|
||||
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
|
||||
XFS_FSB_TO_B(mp, 1)) +
|
||||
xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
|
||||
XFS_FSB_TO_B(mp, 1))),
|
||||
(xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
|
||||
XFS_FSB_TO_B(mp, 1))));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -117,18 +145,17 @@ xfs_calc_itruncate_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return XFS_DQUOT_LOGRES(mp) +
|
||||
MAX((mp->m_sb.sb_inodesize +
|
||||
XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1) +
|
||||
128 * (2 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK))),
|
||||
(4 * mp->m_sb.sb_sectsize +
|
||||
4 * mp->m_sb.sb_sectsize +
|
||||
mp->m_sb.sb_sectsize +
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 4) +
|
||||
128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4)) +
|
||||
128 * 5 +
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 1) +
|
||||
128 * (2 + XFS_IALLOC_BLOCKS(mp) + mp->m_in_maxlevels +
|
||||
XFS_ALLOCFREE_LOG_COUNT(mp, 1))));
|
||||
MAX((xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
|
||||
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1,
|
||||
XFS_FSB_TO_B(mp, 1))),
|
||||
(xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 4),
|
||||
XFS_FSB_TO_B(mp, 1)) +
|
||||
xfs_calc_buf_res(5, 0) +
|
||||
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
|
||||
XFS_FSB_TO_B(mp, 1)) +
|
||||
xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) +
|
||||
mp->m_in_maxlevels, 0)));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -148,14 +175,12 @@ xfs_calc_rename_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return XFS_DQUOT_LOGRES(mp) +
|
||||
MAX((4 * mp->m_sb.sb_inodesize +
|
||||
2 * XFS_DIROP_LOG_RES(mp) +
|
||||
128 * (4 + 2 * XFS_DIROP_LOG_COUNT(mp))),
|
||||
(3 * mp->m_sb.sb_sectsize +
|
||||
3 * mp->m_sb.sb_sectsize +
|
||||
mp->m_sb.sb_sectsize +
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 3) +
|
||||
128 * (7 + XFS_ALLOCFREE_LOG_COUNT(mp, 3))));
|
||||
MAX((xfs_calc_buf_res(4, mp->m_sb.sb_inodesize) +
|
||||
xfs_calc_buf_res(2 * XFS_DIROP_LOG_COUNT(mp),
|
||||
XFS_FSB_TO_B(mp, 1))),
|
||||
(xfs_calc_buf_res(7, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 3),
|
||||
XFS_FSB_TO_B(mp, 1))));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -175,15 +200,12 @@ xfs_calc_link_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return XFS_DQUOT_LOGRES(mp) +
|
||||
MAX((mp->m_sb.sb_inodesize +
|
||||
mp->m_sb.sb_inodesize +
|
||||
XFS_DIROP_LOG_RES(mp) +
|
||||
128 * (2 + XFS_DIROP_LOG_COUNT(mp))),
|
||||
(mp->m_sb.sb_sectsize +
|
||||
mp->m_sb.sb_sectsize +
|
||||
mp->m_sb.sb_sectsize +
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 1) +
|
||||
128 * (3 + XFS_ALLOCFREE_LOG_COUNT(mp, 1))));
|
||||
MAX((xfs_calc_buf_res(2, mp->m_sb.sb_inodesize) +
|
||||
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
|
||||
XFS_FSB_TO_B(mp, 1))),
|
||||
(xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
|
||||
XFS_FSB_TO_B(mp, 1))));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -203,15 +225,12 @@ xfs_calc_remove_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return XFS_DQUOT_LOGRES(mp) +
|
||||
MAX((mp->m_sb.sb_inodesize +
|
||||
mp->m_sb.sb_inodesize +
|
||||
XFS_DIROP_LOG_RES(mp) +
|
||||
128 * (2 + XFS_DIROP_LOG_COUNT(mp))),
|
||||
(2 * mp->m_sb.sb_sectsize +
|
||||
2 * mp->m_sb.sb_sectsize +
|
||||
mp->m_sb.sb_sectsize +
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 2) +
|
||||
128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2))));
|
||||
MAX((xfs_calc_buf_res(2, mp->m_sb.sb_inodesize) +
|
||||
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
|
||||
XFS_FSB_TO_B(mp, 1))),
|
||||
(xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
|
||||
XFS_FSB_TO_B(mp, 1))));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -233,18 +252,18 @@ xfs_calc_symlink_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return XFS_DQUOT_LOGRES(mp) +
|
||||
MAX((mp->m_sb.sb_inodesize +
|
||||
mp->m_sb.sb_inodesize +
|
||||
XFS_FSB_TO_B(mp, 1) +
|
||||
XFS_DIROP_LOG_RES(mp) +
|
||||
1024 +
|
||||
128 * (4 + XFS_DIROP_LOG_COUNT(mp))),
|
||||
(2 * mp->m_sb.sb_sectsize +
|
||||
XFS_FSB_TO_B(mp, XFS_IALLOC_BLOCKS(mp)) +
|
||||
XFS_FSB_TO_B(mp, mp->m_in_maxlevels) +
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 1) +
|
||||
128 * (2 + XFS_IALLOC_BLOCKS(mp) + mp->m_in_maxlevels +
|
||||
XFS_ALLOCFREE_LOG_COUNT(mp, 1))));
|
||||
MAX((xfs_calc_buf_res(2, mp->m_sb.sb_inodesize) +
|
||||
xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) +
|
||||
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
|
||||
XFS_FSB_TO_B(mp, 1)) +
|
||||
xfs_calc_buf_res(1, 1024)),
|
||||
(xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(XFS_IALLOC_BLOCKS(mp),
|
||||
XFS_FSB_TO_B(mp, 1)) +
|
||||
xfs_calc_buf_res(mp->m_in_maxlevels,
|
||||
XFS_FSB_TO_B(mp, 1)) +
|
||||
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
|
||||
XFS_FSB_TO_B(mp, 1))));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -267,18 +286,19 @@ xfs_calc_create_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return XFS_DQUOT_LOGRES(mp) +
|
||||
MAX((mp->m_sb.sb_inodesize +
|
||||
mp->m_sb.sb_inodesize +
|
||||
MAX((xfs_calc_buf_res(2, mp->m_sb.sb_inodesize) +
|
||||
xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
|
||||
(uint)XFS_FSB_TO_B(mp, 1) +
|
||||
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
|
||||
XFS_FSB_TO_B(mp, 1))),
|
||||
(xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
|
||||
mp->m_sb.sb_sectsize +
|
||||
XFS_FSB_TO_B(mp, 1) +
|
||||
XFS_DIROP_LOG_RES(mp) +
|
||||
128 * (3 + XFS_DIROP_LOG_COUNT(mp))),
|
||||
(3 * mp->m_sb.sb_sectsize +
|
||||
XFS_FSB_TO_B(mp, XFS_IALLOC_BLOCKS(mp)) +
|
||||
XFS_FSB_TO_B(mp, mp->m_in_maxlevels) +
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 1) +
|
||||
128 * (2 + XFS_IALLOC_BLOCKS(mp) + mp->m_in_maxlevels +
|
||||
XFS_ALLOCFREE_LOG_COUNT(mp, 1))));
|
||||
xfs_calc_buf_res(XFS_IALLOC_BLOCKS(mp),
|
||||
XFS_FSB_TO_B(mp, 1)) +
|
||||
xfs_calc_buf_res(mp->m_in_maxlevels,
|
||||
XFS_FSB_TO_B(mp, 1)) +
|
||||
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
|
||||
XFS_FSB_TO_B(mp, 1))));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -306,16 +326,16 @@ xfs_calc_ifree_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return XFS_DQUOT_LOGRES(mp) +
|
||||
mp->m_sb.sb_inodesize +
|
||||
mp->m_sb.sb_sectsize +
|
||||
mp->m_sb.sb_sectsize +
|
||||
XFS_FSB_TO_B(mp, 1) +
|
||||
xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
|
||||
xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) +
|
||||
MAX((__uint16_t)XFS_FSB_TO_B(mp, 1),
|
||||
XFS_INODE_CLUSTER_SIZE(mp)) +
|
||||
128 * 5 +
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 1) +
|
||||
128 * (2 + XFS_IALLOC_BLOCKS(mp) + mp->m_in_maxlevels +
|
||||
XFS_ALLOCFREE_LOG_COUNT(mp, 1));
|
||||
xfs_calc_buf_res(1, 0) +
|
||||
xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) +
|
||||
mp->m_in_maxlevels, 0) +
|
||||
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
|
||||
XFS_FSB_TO_B(mp, 1));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -343,9 +363,9 @@ STATIC uint
|
||||
xfs_calc_growdata_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return mp->m_sb.sb_sectsize * 3 +
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 1) +
|
||||
128 * (3 + XFS_ALLOCFREE_LOG_COUNT(mp, 1));
|
||||
return xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
|
||||
XFS_FSB_TO_B(mp, 1));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -362,12 +382,12 @@ STATIC uint
|
||||
xfs_calc_growrtalloc_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return 2 * mp->m_sb.sb_sectsize +
|
||||
XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)) +
|
||||
mp->m_sb.sb_inodesize +
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 1) +
|
||||
128 * (3 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) +
|
||||
XFS_ALLOCFREE_LOG_COUNT(mp, 1));
|
||||
return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
|
||||
XFS_FSB_TO_B(mp, 1)) +
|
||||
xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
|
||||
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
|
||||
XFS_FSB_TO_B(mp, 1));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -379,7 +399,7 @@ STATIC uint
|
||||
xfs_calc_growrtzero_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return mp->m_sb.sb_blocksize + 128;
|
||||
return xfs_calc_buf_res(1, mp->m_sb.sb_blocksize);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -396,11 +416,10 @@ STATIC uint
|
||||
xfs_calc_growrtfree_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return mp->m_sb.sb_sectsize +
|
||||
2 * mp->m_sb.sb_inodesize +
|
||||
mp->m_sb.sb_blocksize +
|
||||
mp->m_rsumsize +
|
||||
128 * 5;
|
||||
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(2, mp->m_sb.sb_inodesize) +
|
||||
xfs_calc_buf_res(1, mp->m_sb.sb_blocksize) +
|
||||
xfs_calc_buf_res(1, mp->m_rsumsize);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -411,7 +430,7 @@ STATIC uint
|
||||
xfs_calc_swrite_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return mp->m_sb.sb_inodesize + 128;
|
||||
return xfs_calc_buf_res(1, mp->m_sb.sb_inodesize);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -421,7 +440,7 @@ xfs_calc_swrite_reservation(
|
||||
STATIC uint
|
||||
xfs_calc_writeid_reservation(xfs_mount_t *mp)
|
||||
{
|
||||
return mp->m_sb.sb_inodesize + 128;
|
||||
return xfs_calc_buf_res(1, mp->m_sb.sb_inodesize);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -437,13 +456,13 @@ xfs_calc_addafork_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return XFS_DQUOT_LOGRES(mp) +
|
||||
mp->m_sb.sb_inodesize +
|
||||
mp->m_sb.sb_sectsize * 2 +
|
||||
mp->m_dirblksize +
|
||||
XFS_FSB_TO_B(mp, XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1) +
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 1) +
|
||||
128 * (4 + XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1 +
|
||||
XFS_ALLOCFREE_LOG_COUNT(mp, 1));
|
||||
xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
|
||||
xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(1, mp->m_dirblksize) +
|
||||
xfs_calc_buf_res(XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1,
|
||||
XFS_FSB_TO_B(mp, 1)) +
|
||||
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
|
||||
XFS_FSB_TO_B(mp, 1));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -461,35 +480,51 @@ STATIC uint
|
||||
xfs_calc_attrinval_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return MAX((mp->m_sb.sb_inodesize +
|
||||
XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) +
|
||||
128 * (1 + XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK))),
|
||||
(4 * mp->m_sb.sb_sectsize +
|
||||
4 * mp->m_sb.sb_sectsize +
|
||||
mp->m_sb.sb_sectsize +
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 4) +
|
||||
128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4))));
|
||||
return MAX((xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
|
||||
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK),
|
||||
XFS_FSB_TO_B(mp, 1))),
|
||||
(xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 4),
|
||||
XFS_FSB_TO_B(mp, 1))));
|
||||
}
|
||||
|
||||
/*
|
||||
* Setting an attribute.
|
||||
* Setting an attribute at mount time.
|
||||
* the inode getting the attribute
|
||||
* the superblock for allocations
|
||||
* the agfs extents are allocated from
|
||||
* the attribute btree * max depth
|
||||
* the inode allocation btree
|
||||
* Since attribute transaction space is dependent on the size of the attribute,
|
||||
* the calculation is done partially at mount time and partially at runtime.
|
||||
* the calculation is done partially at mount time and partially at runtime(see
|
||||
* below).
|
||||
*/
|
||||
STATIC uint
|
||||
xfs_calc_attrset_reservation(
|
||||
xfs_calc_attrsetm_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return XFS_DQUOT_LOGRES(mp) +
|
||||
mp->m_sb.sb_inodesize +
|
||||
mp->m_sb.sb_sectsize +
|
||||
XFS_FSB_TO_B(mp, XFS_DA_NODE_MAXDEPTH) +
|
||||
128 * (2 + XFS_DA_NODE_MAXDEPTH);
|
||||
xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
|
||||
xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH, XFS_FSB_TO_B(mp, 1));
|
||||
}
|
||||
|
||||
/*
|
||||
* Setting an attribute at runtime, transaction space unit per block.
|
||||
* the superblock for allocations: sector size
|
||||
* the inode bmap btree could join or split: max depth * block size
|
||||
* Since the runtime attribute transaction space is dependent on the total
|
||||
* blocks needed for the 1st bmap, here we calculate out the space unit for
|
||||
* one block so that the caller could figure out the total space according
|
||||
* to the attibute extent length in blocks by: ext * XFS_ATTRSETRT_LOG_RES(mp).
|
||||
*/
|
||||
STATIC uint
|
||||
xfs_calc_attrsetrt_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK),
|
||||
XFS_FSB_TO_B(mp, 1));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -508,16 +543,15 @@ xfs_calc_attrrm_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return XFS_DQUOT_LOGRES(mp) +
|
||||
MAX((mp->m_sb.sb_inodesize +
|
||||
XFS_FSB_TO_B(mp, XFS_DA_NODE_MAXDEPTH) +
|
||||
XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) +
|
||||
128 * (1 + XFS_DA_NODE_MAXDEPTH +
|
||||
XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK))),
|
||||
(2 * mp->m_sb.sb_sectsize +
|
||||
2 * mp->m_sb.sb_sectsize +
|
||||
mp->m_sb.sb_sectsize +
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 2) +
|
||||
128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2))));
|
||||
MAX((xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
|
||||
xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH,
|
||||
XFS_FSB_TO_B(mp, 1)) +
|
||||
(uint)XFS_FSB_TO_B(mp,
|
||||
XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) +
|
||||
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), 0)),
|
||||
(xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
|
||||
XFS_FSB_TO_B(mp, 1))));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -527,7 +561,78 @@ STATIC uint
|
||||
xfs_calc_clear_agi_bucket_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return mp->m_sb.sb_sectsize + 128;
|
||||
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clearing the quotaflags in the superblock.
|
||||
* the super block for changing quota flags: sector size
|
||||
*/
|
||||
STATIC uint
|
||||
xfs_calc_qm_sbchange_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
|
||||
}
|
||||
|
||||
/*
|
||||
* Adjusting quota limits.
|
||||
* the xfs_disk_dquot_t: sizeof(struct xfs_disk_dquot)
|
||||
*/
|
||||
STATIC uint
|
||||
xfs_calc_qm_setqlim_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return xfs_calc_buf_res(1, sizeof(struct xfs_disk_dquot));
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocating quota on disk if needed.
|
||||
* the write transaction log space: XFS_WRITE_LOG_RES(mp)
|
||||
* the unit of quota allocation: one system block size
|
||||
*/
|
||||
STATIC uint
|
||||
xfs_calc_qm_dqalloc_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return XFS_WRITE_LOG_RES(mp) +
|
||||
xfs_calc_buf_res(1,
|
||||
XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Turning off quotas.
|
||||
* the xfs_qoff_logitem_t: sizeof(struct xfs_qoff_logitem) * 2
|
||||
* the superblock for the quota flags: sector size
|
||||
*/
|
||||
STATIC uint
|
||||
xfs_calc_qm_quotaoff_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return sizeof(struct xfs_qoff_logitem) * 2 +
|
||||
xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
|
||||
}
|
||||
|
||||
/*
|
||||
* End of turning off quotas.
|
||||
* the xfs_qoff_logitem_t: sizeof(struct xfs_qoff_logitem) * 2
|
||||
*/
|
||||
STATIC uint
|
||||
xfs_calc_qm_quotaoff_end_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return sizeof(struct xfs_qoff_logitem) * 2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Syncing the incore super block changes to disk.
|
||||
* the super block to reflect the changes: sector size
|
||||
*/
|
||||
STATIC uint
|
||||
xfs_calc_sb_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -555,12 +660,19 @@ xfs_trans_init(
|
||||
resp->tr_writeid = xfs_calc_writeid_reservation(mp);
|
||||
resp->tr_addafork = xfs_calc_addafork_reservation(mp);
|
||||
resp->tr_attrinval = xfs_calc_attrinval_reservation(mp);
|
||||
resp->tr_attrset = xfs_calc_attrset_reservation(mp);
|
||||
resp->tr_attrsetm = xfs_calc_attrsetm_reservation(mp);
|
||||
resp->tr_attrsetrt = xfs_calc_attrsetrt_reservation(mp);
|
||||
resp->tr_attrrm = xfs_calc_attrrm_reservation(mp);
|
||||
resp->tr_clearagi = xfs_calc_clear_agi_bucket_reservation(mp);
|
||||
resp->tr_growrtalloc = xfs_calc_growrtalloc_reservation(mp);
|
||||
resp->tr_growrtzero = xfs_calc_growrtzero_reservation(mp);
|
||||
resp->tr_growrtfree = xfs_calc_growrtfree_reservation(mp);
|
||||
resp->tr_qm_sbchange = xfs_calc_qm_sbchange_reservation(mp);
|
||||
resp->tr_qm_setqlim = xfs_calc_qm_setqlim_reservation(mp);
|
||||
resp->tr_qm_dqalloc = xfs_calc_qm_dqalloc_reservation(mp);
|
||||
resp->tr_qm_quotaoff = xfs_calc_qm_quotaoff_reservation(mp);
|
||||
resp->tr_qm_equotaoff = xfs_calc_qm_quotaoff_end_reservation(mp);
|
||||
resp->tr_sb = xfs_calc_sb_reservation(mp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -252,17 +252,19 @@ struct xfs_log_item_desc {
|
||||
* as long as SWRITE logs the entire inode core
|
||||
*/
|
||||
#define XFS_FSYNC_TS_LOG_RES(mp) ((mp)->m_reservations.tr_swrite)
|
||||
#define XFS_WRITEID_LOG_RES(mp) ((mp)->m_reservations.tr_swrite)
|
||||
#define XFS_WRITEID_LOG_RES(mp) ((mp)->m_reservations.tr_swrite)
|
||||
#define XFS_ADDAFORK_LOG_RES(mp) ((mp)->m_reservations.tr_addafork)
|
||||
#define XFS_ATTRINVAL_LOG_RES(mp) ((mp)->m_reservations.tr_attrinval)
|
||||
#define XFS_ATTRSET_LOG_RES(mp, ext) \
|
||||
((mp)->m_reservations.tr_attrset + \
|
||||
(ext * (mp)->m_sb.sb_sectsize) + \
|
||||
(ext * XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK))) + \
|
||||
(128 * (ext + (ext * XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)))))
|
||||
#define XFS_ATTRRM_LOG_RES(mp) ((mp)->m_reservations.tr_attrrm)
|
||||
#define XFS_ATTRSETM_LOG_RES(mp) ((mp)->m_reservations.tr_attrsetm)
|
||||
#define XFS_ATTRSETRT_LOG_RES(mp) ((mp)->m_reservations.tr_attrsetrt)
|
||||
#define XFS_ATTRRM_LOG_RES(mp) ((mp)->m_reservations.tr_attrrm)
|
||||
#define XFS_CLEAR_AGI_BUCKET_LOG_RES(mp) ((mp)->m_reservations.tr_clearagi)
|
||||
|
||||
#define XFS_QM_SBCHANGE_LOG_RES(mp) ((mp)->m_reservations.tr_qm_sbchange)
|
||||
#define XFS_QM_SETQLIM_LOG_RES(mp) ((mp)->m_reservations.tr_qm_setqlim)
|
||||
#define XFS_QM_DQALLOC_LOG_RES(mp) ((mp)->m_reservations.tr_qm_dqalloc)
|
||||
#define XFS_QM_QUOTAOFF_LOG_RES(mp) ((mp)->m_reservations.tr_qm_quotaoff)
|
||||
#define XFS_QM_QUOTAOFF_END_LOG_RES(mp) ((mp)->m_reservations.tr_qm_equotaoff)
|
||||
#define XFS_SB_LOG_RES(mp) ((mp)->m_reservations.tr_sb)
|
||||
|
||||
/*
|
||||
* Various log count values.
|
||||
|
@ -55,20 +55,6 @@ xfs_ail_check(
|
||||
ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
|
||||
|
||||
|
||||
#ifdef XFS_TRANS_DEBUG
|
||||
/*
|
||||
* Walk the list checking lsn ordering, and that every entry has the
|
||||
* XFS_LI_IN_AIL flag set. This is really expensive, so only do it
|
||||
* when specifically debugging the transaction subsystem.
|
||||
*/
|
||||
prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
|
||||
list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
|
||||
if (&prev_lip->li_ail != &ailp->xa_ail)
|
||||
ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
|
||||
ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
|
||||
prev_lip = lip;
|
||||
}
|
||||
#endif /* XFS_TRANS_DEBUG */
|
||||
}
|
||||
#else /* !DEBUG */
|
||||
#define xfs_ail_check(a,l)
|
||||
|
@ -516,7 +516,7 @@ xfs_trans_unreserve_and_mod_dquots(
|
||||
int i, j;
|
||||
xfs_dquot_t *dqp;
|
||||
xfs_dqtrx_t *qtrx, *qa;
|
||||
boolean_t locked;
|
||||
bool locked;
|
||||
|
||||
if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
|
||||
return;
|
||||
@ -537,17 +537,17 @@ xfs_trans_unreserve_and_mod_dquots(
|
||||
* about the number of blocks used field, or deltas.
|
||||
* Also we don't bother to zero the fields.
|
||||
*/
|
||||
locked = B_FALSE;
|
||||
locked = false;
|
||||
if (qtrx->qt_blk_res) {
|
||||
xfs_dqlock(dqp);
|
||||
locked = B_TRUE;
|
||||
locked = true;
|
||||
dqp->q_res_bcount -=
|
||||
(xfs_qcnt_t)qtrx->qt_blk_res;
|
||||
}
|
||||
if (qtrx->qt_ino_res) {
|
||||
if (!locked) {
|
||||
xfs_dqlock(dqp);
|
||||
locked = B_TRUE;
|
||||
locked = true;
|
||||
}
|
||||
dqp->q_res_icount -=
|
||||
(xfs_qcnt_t)qtrx->qt_ino_res;
|
||||
@ -556,7 +556,7 @@ xfs_trans_unreserve_and_mod_dquots(
|
||||
if (qtrx->qt_rtblk_res) {
|
||||
if (!locked) {
|
||||
xfs_dqlock(dqp);
|
||||
locked = B_TRUE;
|
||||
locked = true;
|
||||
}
|
||||
dqp->q_res_rtbcount -=
|
||||
(xfs_qcnt_t)qtrx->qt_rtblk_res;
|
||||
|
@ -33,14 +33,6 @@
|
||||
#include "xfs_inode_item.h"
|
||||
#include "xfs_trace.h"
|
||||
|
||||
#ifdef XFS_TRANS_DEBUG
|
||||
STATIC void
|
||||
xfs_trans_inode_broot_debug(
|
||||
xfs_inode_t *ip);
|
||||
#else
|
||||
#define xfs_trans_inode_broot_debug(ip)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Add a locked inode to the transaction.
|
||||
*
|
||||
@ -67,8 +59,6 @@ xfs_trans_ijoin(
|
||||
* Get a log_item_desc to point at the new item.
|
||||
*/
|
||||
xfs_trans_add_item(tp, &iip->ili_item);
|
||||
|
||||
xfs_trans_inode_broot_debug(ip);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -135,34 +125,3 @@ xfs_trans_log_inode(
|
||||
flags |= ip->i_itemp->ili_last_fields;
|
||||
ip->i_itemp->ili_fields |= flags;
|
||||
}
|
||||
|
||||
#ifdef XFS_TRANS_DEBUG
|
||||
/*
|
||||
* Keep track of the state of the inode btree root to make sure we
|
||||
* log it properly.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_trans_inode_broot_debug(
|
||||
xfs_inode_t *ip)
|
||||
{
|
||||
xfs_inode_log_item_t *iip;
|
||||
|
||||
ASSERT(ip->i_itemp != NULL);
|
||||
iip = ip->i_itemp;
|
||||
if (iip->ili_root_size != 0) {
|
||||
ASSERT(iip->ili_orig_root != NULL);
|
||||
kmem_free(iip->ili_orig_root);
|
||||
iip->ili_root_size = 0;
|
||||
iip->ili_orig_root = NULL;
|
||||
}
|
||||
if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
|
||||
ASSERT((ip->i_df.if_broot != NULL) &&
|
||||
(ip->i_df.if_broot_bytes > 0));
|
||||
iip->ili_root_size = ip->i_df.if_broot_bytes;
|
||||
iip->ili_orig_root =
|
||||
(char*)kmem_alloc(iip->ili_root_size, KM_SLEEP);
|
||||
memcpy(iip->ili_orig_root, (char*)(ip->i_df.if_broot),
|
||||
iip->ili_root_size);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -32,7 +32,6 @@ typedef unsigned int __uint32_t;
|
||||
typedef signed long long int __int64_t;
|
||||
typedef unsigned long long int __uint64_t;
|
||||
|
||||
typedef enum { B_FALSE,B_TRUE } boolean_t;
|
||||
typedef __uint32_t prid_t; /* project ID */
|
||||
typedef __uint32_t inst_t; /* an instruction */
|
||||
|
||||
|
@ -725,7 +725,7 @@ xfs_create(
|
||||
int error;
|
||||
xfs_bmap_free_t free_list;
|
||||
xfs_fsblock_t first_block;
|
||||
boolean_t unlock_dp_on_error = B_FALSE;
|
||||
bool unlock_dp_on_error = false;
|
||||
uint cancel_flags;
|
||||
int committed;
|
||||
prid_t prid;
|
||||
@ -794,7 +794,7 @@ xfs_create(
|
||||
}
|
||||
|
||||
xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
|
||||
unlock_dp_on_error = B_TRUE;
|
||||
unlock_dp_on_error = true;
|
||||
|
||||
xfs_bmap_init(&free_list, &first_block);
|
||||
|
||||
@ -830,7 +830,7 @@ xfs_create(
|
||||
* error path.
|
||||
*/
|
||||
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
|
||||
unlock_dp_on_error = B_FALSE;
|
||||
unlock_dp_on_error = false;
|
||||
|
||||
error = xfs_dir_createname(tp, dp, name, ip->i_ino,
|
||||
&first_block, &free_list, resblks ?
|
||||
@ -1367,7 +1367,7 @@ xfs_symlink(
|
||||
int pathlen;
|
||||
xfs_bmap_free_t free_list;
|
||||
xfs_fsblock_t first_block;
|
||||
boolean_t unlock_dp_on_error = B_FALSE;
|
||||
bool unlock_dp_on_error = false;
|
||||
uint cancel_flags;
|
||||
int committed;
|
||||
xfs_fileoff_t first_fsb;
|
||||
@ -1438,7 +1438,7 @@ xfs_symlink(
|
||||
}
|
||||
|
||||
xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
|
||||
unlock_dp_on_error = B_TRUE;
|
||||
unlock_dp_on_error = true;
|
||||
|
||||
/*
|
||||
* Check whether the directory allows new symlinks or not.
|
||||
@ -1484,7 +1484,7 @@ xfs_symlink(
|
||||
* error path.
|
||||
*/
|
||||
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
|
||||
unlock_dp_on_error = B_FALSE;
|
||||
unlock_dp_on_error = false;
|
||||
|
||||
/*
|
||||
* Also attach the dquot(s) to it, if applicable.
|
||||
|
Loading…
Reference in New Issue
Block a user