2018-06-06 02:42:14 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2006-06-09 04:48:12 +00:00
|
|
|
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
|
2018-07-12 05:26:06 +00:00
|
|
|
* Copyright (c) 2016-2018 Christoph Hellwig.
|
2005-11-02 03:58:39 +00:00
|
|
|
* All Rights Reserved.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2016-06-20 23:52:47 +00:00
|
|
|
#include <linux/iomap.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "xfs.h"
|
|
|
|
#include "xfs_fs.h"
|
2013-10-22 23:36:05 +00:00
|
|
|
#include "xfs_shared.h"
|
2013-10-22 23:50:10 +00:00
|
|
|
#include "xfs_format.h"
|
|
|
|
#include "xfs_log_format.h"
|
|
|
|
#include "xfs_trans_resv.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "xfs_mount.h"
|
2016-08-03 01:15:38 +00:00
|
|
|
#include "xfs_defer.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "xfs_inode.h"
|
2005-11-02 03:38:42 +00:00
|
|
|
#include "xfs_btree.h"
|
2013-10-22 23:51:50 +00:00
|
|
|
#include "xfs_bmap_btree.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "xfs_bmap.h"
|
2013-08-12 10:49:42 +00:00
|
|
|
#include "xfs_bmap_util.h"
|
2017-10-31 19:04:49 +00:00
|
|
|
#include "xfs_errortag.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "xfs_error.h"
|
2013-10-22 23:51:50 +00:00
|
|
|
#include "xfs_trans.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "xfs_trans_space.h"
|
2017-11-01 15:36:47 +00:00
|
|
|
#include "xfs_inode_item.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "xfs_iomap.h"
|
2009-12-14 23:14:59 +00:00
|
|
|
#include "xfs_trace.h"
|
2012-11-06 14:50:38 +00:00
|
|
|
#include "xfs_icache.h"
|
2013-10-22 23:51:50 +00:00
|
|
|
#include "xfs_quota.h"
|
2013-03-18 14:51:47 +00:00
|
|
|
#include "xfs_dquot_item.h"
|
|
|
|
#include "xfs_dquot.h"
|
2016-10-03 16:11:33 +00:00
|
|
|
#include "xfs_reflink.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
|
|
|
|
<< mp->m_writeio_log)
|
|
|
|
|
2016-09-19 01:09:12 +00:00
|
|
|
void
|
|
|
|
xfs_bmbt_to_iomap(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct iomap *iomap,
|
|
|
|
struct xfs_bmbt_irec *imap)
|
|
|
|
{
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
|
|
|
|
if (imap->br_startblock == HOLESTARTBLOCK) {
|
2017-10-01 21:55:54 +00:00
|
|
|
iomap->addr = IOMAP_NULL_ADDR;
|
2016-09-19 01:09:12 +00:00
|
|
|
iomap->type = IOMAP_HOLE;
|
|
|
|
} else if (imap->br_startblock == DELAYSTARTBLOCK) {
|
2017-10-01 21:55:54 +00:00
|
|
|
iomap->addr = IOMAP_NULL_ADDR;
|
2016-09-19 01:09:12 +00:00
|
|
|
iomap->type = IOMAP_DELALLOC;
|
|
|
|
} else {
|
2017-10-01 21:55:54 +00:00
|
|
|
iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock));
|
2016-09-19 01:09:12 +00:00
|
|
|
if (imap->br_state == XFS_EXT_UNWRITTEN)
|
|
|
|
iomap->type = IOMAP_UNWRITTEN;
|
|
|
|
else
|
|
|
|
iomap->type = IOMAP_MAPPED;
|
|
|
|
}
|
|
|
|
iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
|
|
|
|
iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
|
|
|
|
iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
|
2017-08-24 22:12:50 +00:00
|
|
|
iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
|
2016-09-19 01:09:12 +00:00
|
|
|
}
|
|
|
|
|
2018-10-18 06:19:26 +00:00
|
|
|
static void
|
|
|
|
xfs_hole_to_iomap(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct iomap *iomap,
|
|
|
|
xfs_fileoff_t offset_fsb,
|
|
|
|
xfs_fileoff_t end_fsb)
|
|
|
|
{
|
|
|
|
iomap->addr = IOMAP_NULL_ADDR;
|
|
|
|
iomap->type = IOMAP_HOLE;
|
|
|
|
iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb);
|
|
|
|
iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb);
|
|
|
|
iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
|
|
|
|
iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
|
|
|
|
}
|
|
|
|
|
2016-10-03 16:11:43 +00:00
|
|
|
xfs_extlen_t
|
2016-09-19 01:09:28 +00:00
|
|
|
xfs_eof_alignment(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_extlen_t extsize)
|
2006-01-11 04:28:28 +00:00
|
|
|
{
|
2016-09-19 01:09:28 +00:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
xfs_extlen_t align = 0;
|
2006-01-11 04:28:28 +00:00
|
|
|
|
2011-12-18 20:00:05 +00:00
|
|
|
if (!XFS_IS_REALTIME_INODE(ip)) {
|
|
|
|
/*
|
|
|
|
* Round up the allocation request to a stripe unit
|
|
|
|
* (m_dalign) boundary if the file size is >= stripe unit
|
|
|
|
* size, and we are allocating past the allocation eof.
|
|
|
|
*
|
|
|
|
* If mounted with the "-o swalloc" option the alignment is
|
|
|
|
* increased from the strip unit size to the stripe width.
|
|
|
|
*/
|
|
|
|
if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
|
|
|
|
align = mp->m_swidth;
|
|
|
|
else if (mp->m_dalign)
|
|
|
|
align = mp->m_dalign;
|
|
|
|
|
2014-12-03 22:30:51 +00:00
|
|
|
if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
|
|
|
|
align = 0;
|
2011-12-18 20:00:05 +00:00
|
|
|
}
|
2006-01-11 04:28:28 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Always round up the allocation request to an extent boundary
|
|
|
|
* (when file on a real-time subvolume or has di_extsize hint).
|
|
|
|
*/
|
|
|
|
if (extsize) {
|
2014-12-03 22:30:51 +00:00
|
|
|
if (align)
|
|
|
|
align = roundup_64(align, extsize);
|
2006-01-11 04:28:28 +00:00
|
|
|
else
|
|
|
|
align = extsize;
|
|
|
|
}
|
|
|
|
|
2016-09-19 01:09:28 +00:00
|
|
|
return align;
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
|
|
|
xfs_iomap_eof_align_last_fsb(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_extlen_t extsize,
|
|
|
|
xfs_fileoff_t *last_fsb)
|
|
|
|
{
|
|
|
|
xfs_extlen_t align = xfs_eof_alignment(ip, extsize);
|
|
|
|
|
2014-12-03 22:30:51 +00:00
|
|
|
if (align) {
|
|
|
|
xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align);
|
2016-09-19 01:09:28 +00:00
|
|
|
int eof, error;
|
|
|
|
|
2007-10-11 07:34:33 +00:00
|
|
|
error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
|
2006-01-11 04:28:28 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
if (eof)
|
|
|
|
*last_fsb = new_last_fsb;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-09-28 01:03:20 +00:00
|
|
|
STATIC int
|
2011-03-06 23:06:35 +00:00
|
|
|
xfs_alert_fsblock_zero(
|
2006-09-28 01:03:20 +00:00
|
|
|
xfs_inode_t *ip,
|
|
|
|
xfs_bmbt_irec_t *imap)
|
|
|
|
{
|
2011-03-06 23:02:35 +00:00
|
|
|
xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
|
2006-09-28 01:03:20 +00:00
|
|
|
"Access to block zero in inode %llu "
|
|
|
|
"start_block: %llx start_off: %llx "
|
2013-10-12 01:59:05 +00:00
|
|
|
"blkcnt: %llx extent-state: %x",
|
2006-09-28 01:03:20 +00:00
|
|
|
(unsigned long long)ip->i_ino,
|
|
|
|
(unsigned long long)imap->br_startblock,
|
|
|
|
(unsigned long long)imap->br_startoff,
|
|
|
|
(unsigned long long)imap->br_blockcount,
|
|
|
|
imap->br_state);
|
2014-06-25 04:58:08 +00:00
|
|
|
return -EFSCORRUPTED;
|
2006-09-28 01:03:20 +00:00
|
|
|
}
|
|
|
|
|
2010-12-10 08:42:20 +00:00
|
|
|
int
|
2005-04-16 22:20:36 +00:00
|
|
|
xfs_iomap_write_direct(
|
|
|
|
xfs_inode_t *ip,
|
2005-05-05 20:33:40 +00:00
|
|
|
xfs_off_t offset,
|
2005-04-16 22:20:36 +00:00
|
|
|
size_t count,
|
2010-06-24 01:42:19 +00:00
|
|
|
xfs_bmbt_irec_t *imap,
|
2010-12-10 08:42:19 +00:00
|
|
|
int nmaps)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
xfs_mount_t *mp = ip->i_mount;
|
|
|
|
xfs_fileoff_t offset_fsb;
|
|
|
|
xfs_fileoff_t last_fsb;
|
2006-01-11 04:28:28 +00:00
|
|
|
xfs_filblks_t count_fsb, resaligned;
|
2017-02-06 18:42:26 +00:00
|
|
|
xfs_extlen_t extsz;
|
2005-11-02 04:00:01 +00:00
|
|
|
int nimaps;
|
2005-06-21 05:48:47 +00:00
|
|
|
int quota_flag;
|
2005-04-16 22:20:36 +00:00
|
|
|
int rt;
|
|
|
|
xfs_trans_t *tp;
|
2006-01-11 04:28:28 +00:00
|
|
|
uint qblocks, resblks, resrtextents;
|
|
|
|
int error;
|
2015-10-12 04:34:20 +00:00
|
|
|
int lockmode;
|
xfs: Don't use unwritten extents for DAX
DAX has a page fault serialisation problem with block allocation.
Because it allows concurrent page faults and does not have a page
lock to serialise faults to the same page, it can get two concurrent
faults to the page that race.
When two read faults race, this isn't a huge problem as the data
underlying the page is not changing and so "detect and drop" works
just fine. The issues are to do with write faults.
When two write faults occur, we serialise block allocation in
get_blocks() so only one faul will allocate the extent. It will,
however, be marked as an unwritten extent, and that is where the
problem lies - the DAX fault code cannot differentiate between a
block that was just allocated and a block that was preallocated and
needs zeroing. The result is that both write faults end up zeroing
the block and attempting to convert it back to written.
The problem is that the first fault can zero and convert before the
second fault starts zeroing, resulting in the zeroing for the second
fault overwriting the data that the first fault wrote with zeros.
The second fault then attempts to convert the unwritten extent,
which is then a no-op because it's already written. Data loss occurs
as a result of this race.
Because there is no sane locking construct in the page fault code
that we can use for serialisation across the page faults, we need to
ensure block allocation and zeroing occurs atomically in the
filesystem. This means we can still take concurrent page faults and
the only time they will serialise is in the filesystem
mapping/allocation callback. The page fault code will always see
written, initialised extents, so we will be able to remove the
unwritten extent handling from the DAX code when all filesystems are
converted.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-11-03 01:37:00 +00:00
|
|
|
int bmapi_flags = XFS_BMAPI_PREALLOC;
|
2016-04-05 23:19:55 +00:00
|
|
|
uint tflags = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-01-11 04:28:28 +00:00
|
|
|
rt = XFS_IS_REALTIME_INODE(ip);
|
2007-06-18 06:50:37 +00:00
|
|
|
extsz = xfs_get_extsz_hint(ip);
|
2015-10-12 04:34:20 +00:00
|
|
|
lockmode = XFS_ILOCK_SHARED; /* locked by caller */
|
|
|
|
|
|
|
|
ASSERT(xfs_isilocked(ip, lockmode));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-06-18 06:50:37 +00:00
|
|
|
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
|
|
|
last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
|
2011-12-18 20:00:11 +00:00
|
|
|
if ((offset + count) > XFS_ISIZE(ip)) {
|
2015-10-12 04:34:20 +00:00
|
|
|
/*
|
|
|
|
* Assert that the in-core extent list is present since this can
|
|
|
|
* call xfs_iread_extents() and we only have the ilock shared.
|
|
|
|
* This should be safe because the lock was held around a bmapi
|
|
|
|
* call in the caller and we only need it to access the in-core
|
|
|
|
* list.
|
|
|
|
*/
|
|
|
|
ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags &
|
|
|
|
XFS_IFEXTENTS);
|
2016-09-19 01:09:28 +00:00
|
|
|
error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb);
|
2006-01-11 04:28:28 +00:00
|
|
|
if (error)
|
2015-10-12 04:34:20 +00:00
|
|
|
goto out_unlock;
|
2005-04-16 22:20:36 +00:00
|
|
|
} else {
|
2010-12-10 08:42:19 +00:00
|
|
|
if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
|
2018-06-07 14:54:02 +00:00
|
|
|
last_fsb = min(last_fsb, (xfs_fileoff_t)
|
2010-06-24 01:42:19 +00:00
|
|
|
imap->br_blockcount +
|
|
|
|
imap->br_startoff);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-01-11 04:28:28 +00:00
|
|
|
count_fsb = last_fsb - offset_fsb;
|
|
|
|
ASSERT(count_fsb > 0);
|
2017-02-06 18:42:26 +00:00
|
|
|
resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, extsz);
|
2006-01-11 04:28:28 +00:00
|
|
|
|
|
|
|
if (unlikely(rt)) {
|
|
|
|
resrtextents = qblocks = resaligned;
|
|
|
|
resrtextents /= mp->m_sb.sb_rextsize;
|
2007-06-18 06:50:27 +00:00
|
|
|
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
|
|
|
|
quota_flag = XFS_QMOPT_RES_RTBLKS;
|
|
|
|
} else {
|
|
|
|
resrtextents = 0;
|
2006-01-11 04:28:28 +00:00
|
|
|
resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
|
2007-06-18 06:50:27 +00:00
|
|
|
quota_flag = XFS_QMOPT_RES_REGBLKS;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-10-12 04:34:20 +00:00
|
|
|
/*
|
|
|
|
* Drop the shared lock acquired by the caller, attach the dquot if
|
|
|
|
* necessary and move on to transaction setup.
|
|
|
|
*/
|
|
|
|
xfs_iunlock(ip, lockmode);
|
2018-05-04 22:30:21 +00:00
|
|
|
error = xfs_qm_dqattach(ip);
|
2015-10-12 04:34:20 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
xfs: Don't use unwritten extents for DAX
DAX has a page fault serialisation problem with block allocation.
Because it allows concurrent page faults and does not have a page
lock to serialise faults to the same page, it can get two concurrent
faults to the page that race.
When two read faults race, this isn't a huge problem as the data
underlying the page is not changing and so "detect and drop" works
just fine. The issues are to do with write faults.
When two write faults occur, we serialise block allocation in
get_blocks() so only one faul will allocate the extent. It will,
however, be marked as an unwritten extent, and that is where the
problem lies - the DAX fault code cannot differentiate between a
block that was just allocated and a block that was preallocated and
needs zeroing. The result is that both write faults end up zeroing
the block and attempting to convert it back to written.
The problem is that the first fault can zero and convert before the
second fault starts zeroing, resulting in the zeroing for the second
fault overwriting the data that the first fault wrote with zeros.
The second fault then attempts to convert the unwritten extent,
which is then a no-op because it's already written. Data loss occurs
as a result of this race.
Because there is no sane locking construct in the page fault code
that we can use for serialisation across the page faults, we need to
ensure block allocation and zeroing occurs atomically in the
filesystem. This means we can still take concurrent page faults and
the only time they will serialise is in the filesystem
mapping/allocation callback. The page fault code will always see
written, initialised extents, so we will be able to remove the
unwritten extent handling from the DAX code when all filesystems are
converted.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-11-03 01:37:00 +00:00
|
|
|
/*
|
|
|
|
* For DAX, we do not allocate unwritten extents, but instead we zero
|
|
|
|
* the block before we commit the transaction. Ideally we'd like to do
|
|
|
|
* this outside the transaction context, but if we commit and then crash
|
|
|
|
* we may not have zeroed the blocks and this will be exposed on
|
|
|
|
* recovery of the allocation. Hence we must zero before commit.
|
2016-01-04 05:22:45 +00:00
|
|
|
*
|
xfs: Don't use unwritten extents for DAX
DAX has a page fault serialisation problem with block allocation.
Because it allows concurrent page faults and does not have a page
lock to serialise faults to the same page, it can get two concurrent
faults to the page that race.
When two read faults race, this isn't a huge problem as the data
underlying the page is not changing and so "detect and drop" works
just fine. The issues are to do with write faults.
When two write faults occur, we serialise block allocation in
get_blocks() so only one faul will allocate the extent. It will,
however, be marked as an unwritten extent, and that is where the
problem lies - the DAX fault code cannot differentiate between a
block that was just allocated and a block that was preallocated and
needs zeroing. The result is that both write faults end up zeroing
the block and attempting to convert it back to written.
The problem is that the first fault can zero and convert before the
second fault starts zeroing, resulting in the zeroing for the second
fault overwriting the data that the first fault wrote with zeros.
The second fault then attempts to convert the unwritten extent,
which is then a no-op because it's already written. Data loss occurs
as a result of this race.
Because there is no sane locking construct in the page fault code
that we can use for serialisation across the page faults, we need to
ensure block allocation and zeroing occurs atomically in the
filesystem. This means we can still take concurrent page faults and
the only time they will serialise is in the filesystem
mapping/allocation callback. The page fault code will always see
written, initialised extents, so we will be able to remove the
unwritten extent handling from the DAX code when all filesystems are
converted.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-11-03 01:37:00 +00:00
|
|
|
* Further, if we are mapping unwritten extents here, we need to zero
|
|
|
|
* and convert them to written so that we don't need an unwritten extent
|
|
|
|
* callback for DAX. This also means that we need to be able to dip into
|
2016-01-04 05:22:45 +00:00
|
|
|
* the reserve block pool for bmbt block allocation if there is no space
|
|
|
|
* left but we need to do unwritten extent conversion.
|
xfs: Don't use unwritten extents for DAX
DAX has a page fault serialisation problem with block allocation.
Because it allows concurrent page faults and does not have a page
lock to serialise faults to the same page, it can get two concurrent
faults to the page that race.
When two read faults race, this isn't a huge problem as the data
underlying the page is not changing and so "detect and drop" works
just fine. The issues are to do with write faults.
When two write faults occur, we serialise block allocation in
get_blocks() so only one faul will allocate the extent. It will,
however, be marked as an unwritten extent, and that is where the
problem lies - the DAX fault code cannot differentiate between a
block that was just allocated and a block that was preallocated and
needs zeroing. The result is that both write faults end up zeroing
the block and attempting to convert it back to written.
The problem is that the first fault can zero and convert before the
second fault starts zeroing, resulting in the zeroing for the second
fault overwriting the data that the first fault wrote with zeros.
The second fault then attempts to convert the unwritten extent,
which is then a no-op because it's already written. Data loss occurs
as a result of this race.
Because there is no sane locking construct in the page fault code
that we can use for serialisation across the page faults, we need to
ensure block allocation and zeroing occurs atomically in the
filesystem. This means we can still take concurrent page faults and
the only time they will serialise is in the filesystem
mapping/allocation callback. The page fault code will always see
written, initialised extents, so we will be able to remove the
unwritten extent handling from the DAX code when all filesystems are
converted.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-11-03 01:37:00 +00:00
|
|
|
*/
|
|
|
|
if (IS_DAX(VFS_I(ip))) {
|
|
|
|
bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
|
2017-03-28 21:53:36 +00:00
|
|
|
if (imap->br_state == XFS_EXT_UNWRITTEN) {
|
2016-04-05 23:19:55 +00:00
|
|
|
tflags |= XFS_TRANS_RESERVE;
|
2016-01-04 05:22:45 +00:00
|
|
|
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
|
|
|
|
}
|
xfs: Don't use unwritten extents for DAX
DAX has a page fault serialisation problem with block allocation.
Because it allows concurrent page faults and does not have a page
lock to serialise faults to the same page, it can get two concurrent
faults to the page that race.
When two read faults race, this isn't a huge problem as the data
underlying the page is not changing and so "detect and drop" works
just fine. The issues are to do with write faults.
When two write faults occur, we serialise block allocation in
get_blocks() so only one faul will allocate the extent. It will,
however, be marked as an unwritten extent, and that is where the
problem lies - the DAX fault code cannot differentiate between a
block that was just allocated and a block that was preallocated and
needs zeroing. The result is that both write faults end up zeroing
the block and attempting to convert it back to written.
The problem is that the first fault can zero and convert before the
second fault starts zeroing, resulting in the zeroing for the second
fault overwriting the data that the first fault wrote with zeros.
The second fault then attempts to convert the unwritten extent,
which is then a no-op because it's already written. Data loss occurs
as a result of this race.
Because there is no sane locking construct in the page fault code
that we can use for serialisation across the page faults, we need to
ensure block allocation and zeroing occurs atomically in the
filesystem. This means we can still take concurrent page faults and
the only time they will serialise is in the filesystem
mapping/allocation callback. The page fault code will always see
written, initialised extents, so we will be able to remove the
unwritten extent handling from the DAX code when all filesystems are
converted.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-11-03 01:37:00 +00:00
|
|
|
}
|
2016-04-05 23:19:55 +00:00
|
|
|
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
|
|
|
|
tflags, &tp);
|
|
|
|
if (error)
|
2014-06-22 05:04:54 +00:00
|
|
|
return error;
|
2012-03-27 14:34:50 +00:00
|
|
|
|
2015-10-12 04:34:20 +00:00
|
|
|
lockmode = XFS_ILOCK_EXCL;
|
|
|
|
xfs_ilock(ip, lockmode);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-06-08 13:33:32 +00:00
|
|
|
error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
|
2006-01-11 04:28:28 +00:00
|
|
|
if (error)
|
2012-03-27 14:34:50 +00:00
|
|
|
goto out_trans_cancel;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-09-19 15:00:54 +00:00
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
2010-06-24 01:42:19 +00:00
|
|
|
* From this point onwards we overwrite the imap pointer that the
|
|
|
|
* caller gave to us.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-06-21 05:48:47 +00:00
|
|
|
nimaps = 1;
|
2014-02-09 23:27:43 +00:00
|
|
|
error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
|
2018-07-12 05:26:25 +00:00
|
|
|
bmapi_flags, resblks, imap, &nimaps);
|
2005-06-21 05:48:47 +00:00
|
|
|
if (error)
|
2018-07-24 20:43:13 +00:00
|
|
|
goto out_res_cancel;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
2005-06-21 05:48:47 +00:00
|
|
|
* Complete the transaction
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2015-06-04 03:48:08 +00:00
|
|
|
error = xfs_trans_commit(tp);
|
2005-06-21 05:48:47 +00:00
|
|
|
if (error)
|
2012-03-27 14:34:50 +00:00
|
|
|
goto out_unlock;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-06-21 05:48:47 +00:00
|
|
|
/*
|
|
|
|
* Copy any maps to caller's array and return any error.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
if (nimaps == 0) {
|
2014-06-25 04:58:08 +00:00
|
|
|
error = -ENOSPC;
|
2012-03-27 14:34:50 +00:00
|
|
|
goto out_unlock;
|
2006-09-28 01:03:20 +00:00
|
|
|
}
|
|
|
|
|
2012-03-27 14:34:50 +00:00
|
|
|
if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
|
2011-03-06 23:06:35 +00:00
|
|
|
error = xfs_alert_fsblock_zero(ip, imap);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-03-27 14:34:50 +00:00
|
|
|
out_unlock:
|
2015-10-12 04:34:20 +00:00
|
|
|
xfs_iunlock(ip, lockmode);
|
2012-03-27 14:34:50 +00:00
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-07-24 20:43:13 +00:00
|
|
|
out_res_cancel:
|
2012-05-08 10:48:53 +00:00
|
|
|
xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
|
2012-03-27 14:34:50 +00:00
|
|
|
out_trans_cancel:
|
2015-06-04 03:47:56 +00:00
|
|
|
xfs_trans_cancel(tp);
|
2012-03-27 14:34:50 +00:00
|
|
|
goto out_unlock;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2013-03-18 14:51:47 +00:00
|
|
|
STATIC bool
|
|
|
|
xfs_quota_need_throttle(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
int type,
|
|
|
|
xfs_fsblock_t alloc_blocks)
|
|
|
|
{
|
|
|
|
struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
|
|
|
|
|
|
|
|
if (!dq || !xfs_this_quota_on(ip->i_mount, type))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* no hi watermark, no throttle */
|
|
|
|
if (!dq->q_prealloc_hi_wmark)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* under the lo watermark, no throttle */
|
|
|
|
if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC void
|
|
|
|
xfs_quota_calc_throttle(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
int type,
|
|
|
|
xfs_fsblock_t *qblocks,
|
2014-07-24 09:56:08 +00:00
|
|
|
int *qshift,
|
|
|
|
int64_t *qfreesp)
|
2013-03-18 14:51:47 +00:00
|
|
|
{
|
|
|
|
int64_t freesp;
|
|
|
|
int shift = 0;
|
|
|
|
struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
|
|
|
|
|
2014-10-01 23:27:09 +00:00
|
|
|
/* no dq, or over hi wmark, squash the prealloc completely */
|
|
|
|
if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
|
2013-03-18 14:51:47 +00:00
|
|
|
*qblocks = 0;
|
2014-07-24 09:56:08 +00:00
|
|
|
*qfreesp = 0;
|
2013-03-18 14:51:47 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount;
|
|
|
|
if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
|
|
|
|
shift = 2;
|
|
|
|
if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
|
|
|
|
shift += 2;
|
|
|
|
if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
|
|
|
|
shift += 2;
|
|
|
|
}
|
|
|
|
|
2014-07-24 09:56:08 +00:00
|
|
|
if (freesp < *qfreesp)
|
|
|
|
*qfreesp = freesp;
|
|
|
|
|
2013-03-18 14:51:47 +00:00
|
|
|
/* only overwrite the throttle values if we are more aggressive */
|
|
|
|
if ((freesp >> shift) < (*qblocks >> *qshift)) {
|
|
|
|
*qblocks = freesp;
|
|
|
|
*qshift = shift;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-04 00:35:03 +00:00
|
|
|
/*
|
2016-09-19 01:10:21 +00:00
|
|
|
* If we are doing a write at the end of the file and there are no allocations
|
|
|
|
* past this one, then extend the allocation out to the file system's write
|
|
|
|
* iosize.
|
|
|
|
*
|
2011-01-04 00:35:03 +00:00
|
|
|
* If we don't have a user specified preallocation size, dynamically increase
|
2016-09-19 01:10:21 +00:00
|
|
|
* the preallocation size as the size of the file grows. Cap the maximum size
|
2011-01-04 00:35:03 +00:00
|
|
|
* at a single extent or less if the filesystem is near full. The closer the
|
|
|
|
* filesystem is to full, the smaller the maximum prealocation.
|
2016-09-19 01:10:21 +00:00
|
|
|
*
|
|
|
|
* As an exception we don't do any preallocation at all if the file is smaller
|
|
|
|
* than the minimum preallocation and we are using the default dynamic
|
|
|
|
* preallocation scheme, as it is likely this is the only write to the file that
|
|
|
|
* is going to be done.
|
|
|
|
*
|
|
|
|
* We clean up any extra space left over when the file is closed in
|
|
|
|
* xfs_inactive().
|
2011-01-04 00:35:03 +00:00
|
|
|
*/
|
|
|
|
STATIC xfs_fsblock_t
|
|
|
|
xfs_iomap_prealloc_size(
|
2013-02-11 05:05:01 +00:00
|
|
|
struct xfs_inode *ip,
|
2016-09-19 01:10:21 +00:00
|
|
|
loff_t offset,
|
|
|
|
loff_t count,
|
2017-11-03 17:34:43 +00:00
|
|
|
struct xfs_iext_cursor *icur)
|
2011-01-04 00:35:03 +00:00
|
|
|
{
|
2016-09-19 01:10:21 +00:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
2016-11-24 00:39:44 +00:00
|
|
|
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
|
2016-09-19 01:10:21 +00:00
|
|
|
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
2016-11-24 00:39:44 +00:00
|
|
|
struct xfs_bmbt_irec prev;
|
2013-03-18 14:51:43 +00:00
|
|
|
int shift = 0;
|
|
|
|
int64_t freesp;
|
2013-03-18 14:51:47 +00:00
|
|
|
xfs_fsblock_t qblocks;
|
|
|
|
int qshift = 0;
|
2016-09-19 01:10:21 +00:00
|
|
|
xfs_fsblock_t alloc_blocks = 0;
|
|
|
|
|
|
|
|
if (offset + count <= XFS_ISIZE(ip))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
|
|
|
|
(XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If an explicit allocsize is set, the file is small, or we
|
|
|
|
* are writing behind a hole, then use the minimum prealloc:
|
|
|
|
*/
|
|
|
|
if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ||
|
|
|
|
XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
|
2017-11-03 17:34:43 +00:00
|
|
|
!xfs_iext_peek_prev_extent(ifp, icur, &prev) ||
|
2016-11-24 00:39:44 +00:00
|
|
|
prev.br_startoff + prev.br_blockcount < offset_fsb)
|
2016-09-19 01:10:21 +00:00
|
|
|
return mp->m_writeio_blocks;
|
2011-01-04 00:35:03 +00:00
|
|
|
|
2016-09-19 01:10:21 +00:00
|
|
|
/*
|
|
|
|
* Determine the initial size of the preallocation. We are beyond the
|
|
|
|
* current EOF here, but we need to take into account whether this is
|
|
|
|
* a sparse write or an extending write when determining the
|
|
|
|
* preallocation size. Hence we need to look up the extent that ends
|
|
|
|
* at the current write offset and use the result to determine the
|
|
|
|
* preallocation size.
|
|
|
|
*
|
|
|
|
* If the extent is a hole, then preallocation is essentially disabled.
|
|
|
|
* Otherwise we take the size of the preceding data extent as the basis
|
|
|
|
* for the preallocation size. If the size of the extent is greater than
|
|
|
|
* half the maximum extent length, then use the current offset as the
|
|
|
|
* basis. This ensures that for large files the preallocation size
|
|
|
|
* always extends to MAXEXTLEN rather than falling short due to things
|
|
|
|
* like stripe unit/width alignment of real extents.
|
|
|
|
*/
|
2016-11-24 00:39:44 +00:00
|
|
|
if (prev.br_blockcount <= (MAXEXTLEN >> 1))
|
|
|
|
alloc_blocks = prev.br_blockcount << 1;
|
2016-09-19 01:10:21 +00:00
|
|
|
else
|
|
|
|
alloc_blocks = XFS_B_TO_FSB(mp, offset);
|
2013-03-18 14:51:43 +00:00
|
|
|
if (!alloc_blocks)
|
|
|
|
goto check_writeio;
|
2013-03-18 14:51:47 +00:00
|
|
|
qblocks = alloc_blocks;
|
2013-03-18 14:51:43 +00:00
|
|
|
|
2013-03-18 14:51:44 +00:00
|
|
|
/*
|
|
|
|
* MAXEXTLEN is not a power of two value but we round the prealloc down
|
|
|
|
* to the nearest power of two value after throttling. To prevent the
|
|
|
|
* round down from unconditionally reducing the maximum supported prealloc
|
|
|
|
* size, we round up first, apply appropriate throttling, round down and
|
|
|
|
* cap the value to MAXEXTLEN.
|
|
|
|
*/
|
|
|
|
alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
|
|
|
|
alloc_blocks);
|
2013-03-18 14:51:43 +00:00
|
|
|
|
2015-02-23 10:22:03 +00:00
|
|
|
freesp = percpu_counter_read_positive(&mp->m_fdblocks);
|
2013-03-18 14:51:43 +00:00
|
|
|
if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
|
|
|
|
shift = 2;
|
|
|
|
if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
|
|
|
|
shift++;
|
|
|
|
if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
|
|
|
|
shift++;
|
|
|
|
if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
|
|
|
|
shift++;
|
|
|
|
if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
|
|
|
|
shift++;
|
2011-01-04 00:35:03 +00:00
|
|
|
}
|
2013-03-18 14:51:47 +00:00
|
|
|
|
|
|
|
/*
|
2014-07-24 09:56:08 +00:00
|
|
|
* Check each quota to cap the prealloc size, provide a shift value to
|
|
|
|
* throttle with and adjust amount of available space.
|
2013-03-18 14:51:47 +00:00
|
|
|
*/
|
|
|
|
if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks))
|
2014-07-24 09:56:08 +00:00
|
|
|
xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift,
|
|
|
|
&freesp);
|
2013-03-18 14:51:47 +00:00
|
|
|
if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks))
|
2014-07-24 09:56:08 +00:00
|
|
|
xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift,
|
|
|
|
&freesp);
|
2013-03-18 14:51:47 +00:00
|
|
|
if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks))
|
2014-07-24 09:56:08 +00:00
|
|
|
xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift,
|
|
|
|
&freesp);
|
2013-03-18 14:51:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The final prealloc size is set to the minimum of free space available
|
|
|
|
* in each of the quotas and the overall filesystem.
|
|
|
|
*
|
|
|
|
* The shift throttle value is set to the maximum value as determined by
|
|
|
|
* the global low free space values and per-quota low free space values.
|
|
|
|
*/
|
2018-06-07 14:54:02 +00:00
|
|
|
alloc_blocks = min(alloc_blocks, qblocks);
|
|
|
|
shift = max(shift, qshift);
|
2013-03-18 14:51:47 +00:00
|
|
|
|
2013-03-18 14:51:43 +00:00
|
|
|
if (shift)
|
|
|
|
alloc_blocks >>= shift;
|
2013-03-18 14:51:44 +00:00
|
|
|
/*
|
|
|
|
* rounddown_pow_of_two() returns an undefined result if we pass in
|
|
|
|
* alloc_blocks = 0.
|
|
|
|
*/
|
|
|
|
if (alloc_blocks)
|
|
|
|
alloc_blocks = rounddown_pow_of_two(alloc_blocks);
|
|
|
|
if (alloc_blocks > MAXEXTLEN)
|
|
|
|
alloc_blocks = MAXEXTLEN;
|
2013-03-18 14:51:43 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are still trying to allocate more space than is
|
|
|
|
* available, squash the prealloc hard. This can happen if we
|
|
|
|
* have a large file on a small filesystem and the above
|
|
|
|
* lowspace thresholds are smaller than MAXEXTLEN.
|
|
|
|
*/
|
|
|
|
while (alloc_blocks && alloc_blocks >= freesp)
|
|
|
|
alloc_blocks >>= 4;
|
|
|
|
check_writeio:
|
2011-01-04 00:35:03 +00:00
|
|
|
if (alloc_blocks < mp->m_writeio_blocks)
|
|
|
|
alloc_blocks = mp->m_writeio_blocks;
|
2013-03-18 14:51:48 +00:00
|
|
|
trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
|
|
|
|
mp->m_writeio_blocks);
|
2011-01-04 00:35:03 +00:00
|
|
|
return alloc_blocks;
|
|
|
|
}
|
|
|
|
|
2016-09-19 01:10:21 +00:00
|
|
|
static int
|
|
|
|
xfs_file_iomap_begin_delay(
|
|
|
|
struct inode *inode,
|
|
|
|
loff_t offset,
|
|
|
|
loff_t count,
|
2018-10-18 06:19:26 +00:00
|
|
|
unsigned flags,
|
2016-09-19 01:10:21 +00:00
|
|
|
struct iomap *iomap)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2016-09-19 01:10:21 +00:00
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
|
|
|
|
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
|
|
|
xfs_fileoff_t maxbytes_fsb =
|
|
|
|
XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
|
2016-11-28 03:57:42 +00:00
|
|
|
xfs_fileoff_t end_fsb;
|
2016-09-19 01:10:21 +00:00
|
|
|
int error = 0, eof = 0;
|
|
|
|
struct xfs_bmbt_irec got;
|
2017-11-03 17:34:43 +00:00
|
|
|
struct xfs_iext_cursor icur;
|
2016-11-28 03:57:42 +00:00
|
|
|
xfs_fsblock_t prealloc_blocks = 0;
|
2016-09-19 01:10:21 +00:00
|
|
|
|
|
|
|
ASSERT(!XFS_IS_REALTIME_INODE(ip));
|
|
|
|
ASSERT(!xfs_get_extsz_hint(ip));
|
2006-01-11 04:28:28 +00:00
|
|
|
|
2016-09-19 01:10:21 +00:00
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-09-19 01:10:21 +00:00
|
|
|
if (unlikely(XFS_TEST_ERROR(
|
|
|
|
(XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
|
|
|
|
XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
|
2017-06-21 00:54:47 +00:00
|
|
|
mp, XFS_ERRTAG_BMAPIFORMAT))) {
|
2016-09-19 01:10:21 +00:00
|
|
|
XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
|
|
|
|
error = -EFSCORRUPTED;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
2013-02-11 05:05:01 +00:00
|
|
|
|
2016-09-19 01:10:21 +00:00
|
|
|
XFS_STATS_INC(mp, xs_blk_mapw);
|
2011-01-04 00:35:03 +00:00
|
|
|
|
2016-09-19 01:10:21 +00:00
|
|
|
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
|
|
|
error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2018-10-18 06:19:26 +00:00
|
|
|
end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
|
|
|
|
|
2017-11-03 17:34:43 +00:00
|
|
|
eof = !xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got);
|
2018-10-18 06:19:26 +00:00
|
|
|
if (eof)
|
|
|
|
got.br_startoff = end_fsb; /* fake hole until the end */
|
|
|
|
|
|
|
|
if (got.br_startoff <= offset_fsb) {
|
|
|
|
/*
|
|
|
|
* For reflink files we may need a delalloc reservation when
|
|
|
|
* overwriting shared extents. This includes zeroing of
|
|
|
|
* existing extents that contain data.
|
|
|
|
*/
|
|
|
|
if (xfs_is_reflink_inode(ip) &&
|
|
|
|
((flags & IOMAP_WRITE) ||
|
|
|
|
got.br_state != XFS_EXT_UNWRITTEN)) {
|
2016-10-20 04:53:50 +00:00
|
|
|
xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb);
|
2018-10-18 06:19:37 +00:00
|
|
|
error = xfs_reflink_reserve_cow(ip, &got);
|
2016-10-20 04:53:50 +00:00
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2016-09-19 01:10:21 +00:00
|
|
|
trace_xfs_iomap_found(ip, offset, count, 0, &got);
|
|
|
|
goto done;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-01-11 04:28:28 +00:00
|
|
|
|
2018-10-18 06:19:26 +00:00
|
|
|
if (flags & IOMAP_ZERO) {
|
|
|
|
xfs_hole_to_iomap(ip, iomap, offset_fsb, got.br_startoff);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2018-05-04 22:30:22 +00:00
|
|
|
error = xfs_qm_dqattach_locked(ip, false);
|
2016-09-19 01:10:21 +00:00
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
|
2012-04-29 12:43:19 +00:00
|
|
|
/*
|
2016-09-19 01:10:21 +00:00
|
|
|
* We cap the maximum length we map here to MAX_WRITEBACK_PAGES pages
|
|
|
|
* to keep the chunks of work done where somewhat symmetric with the
|
|
|
|
* work writeback does. This is a completely arbitrary number pulled
|
|
|
|
* out of thin air as a best guess for initial testing.
|
|
|
|
*
|
|
|
|
* Note that the values needs to be less than 32-bits wide until
|
|
|
|
* the lower level functions are updated.
|
2012-04-29 12:43:19 +00:00
|
|
|
*/
|
2016-09-19 01:10:21 +00:00
|
|
|
count = min_t(loff_t, count, 1024 * PAGE_SIZE);
|
2016-11-28 03:57:42 +00:00
|
|
|
end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
|
2016-09-19 01:10:21 +00:00
|
|
|
|
|
|
|
if (eof) {
|
2017-11-03 17:34:43 +00:00
|
|
|
prealloc_blocks = xfs_iomap_prealloc_size(ip, offset, count,
|
|
|
|
&icur);
|
2016-09-19 01:10:21 +00:00
|
|
|
if (prealloc_blocks) {
|
|
|
|
xfs_extlen_t align;
|
|
|
|
xfs_off_t end_offset;
|
2016-11-28 03:57:42 +00:00
|
|
|
xfs_fileoff_t p_end_fsb;
|
2012-04-29 12:43:19 +00:00
|
|
|
|
2016-09-19 01:10:21 +00:00
|
|
|
end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1);
|
2016-11-28 03:57:42 +00:00
|
|
|
p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
|
|
|
|
prealloc_blocks;
|
2016-09-19 01:10:21 +00:00
|
|
|
|
|
|
|
align = xfs_eof_alignment(ip, 0);
|
|
|
|
if (align)
|
2016-11-28 03:57:42 +00:00
|
|
|
p_end_fsb = roundup_64(p_end_fsb, align);
|
2016-09-19 01:10:21 +00:00
|
|
|
|
2016-11-28 03:57:42 +00:00
|
|
|
p_end_fsb = min(p_end_fsb, maxbytes_fsb);
|
|
|
|
ASSERT(p_end_fsb > offset_fsb);
|
|
|
|
prealloc_blocks = p_end_fsb - end_fsb;
|
2016-09-19 01:10:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
retry:
|
2016-10-03 16:11:32 +00:00
|
|
|
error = xfs_bmapi_reserve_delalloc(ip, XFS_DATA_FORK, offset_fsb,
|
2017-11-03 17:34:43 +00:00
|
|
|
end_fsb - offset_fsb, prealloc_blocks, &got, &icur,
|
|
|
|
eof);
|
2011-01-04 00:35:03 +00:00
|
|
|
switch (error) {
|
|
|
|
case 0:
|
2016-09-19 01:10:21 +00:00
|
|
|
break;
|
2014-06-25 04:58:08 +00:00
|
|
|
case -ENOSPC:
|
|
|
|
case -EDQUOT:
|
2016-09-19 01:10:21 +00:00
|
|
|
/* retry without any preallocation */
|
2009-12-14 23:14:59 +00:00
|
|
|
trace_xfs_delalloc_enospc(ip, offset, count);
|
2016-11-28 03:57:42 +00:00
|
|
|
if (prealloc_blocks) {
|
|
|
|
prealloc_blocks = 0;
|
2012-10-08 10:56:04 +00:00
|
|
|
goto retry;
|
2011-01-04 00:35:03 +00:00
|
|
|
}
|
2016-09-19 01:10:21 +00:00
|
|
|
/*FALLTHRU*/
|
|
|
|
default:
|
|
|
|
goto out_unlock;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2017-03-08 17:58:08 +00:00
|
|
|
/*
|
|
|
|
* Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
|
|
|
|
* them out if the write happens to fail.
|
|
|
|
*/
|
2018-06-19 22:10:58 +00:00
|
|
|
iomap->flags |= IOMAP_F_NEW;
|
2016-09-19 01:10:21 +00:00
|
|
|
trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
|
|
|
|
done:
|
|
|
|
if (isnullstartblock(got.br_startblock))
|
|
|
|
got.br_startblock = DELAYSTARTBLOCK;
|
|
|
|
|
|
|
|
if (!got.br_startblock) {
|
|
|
|
error = xfs_alert_fsblock_zero(ip, &got);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
xfs_bmbt_to_iomap(ip, iomap, &got);
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pass in a delayed allocate extent, convert it to real extents;
|
|
|
|
* return to the caller the extent we create which maps on top of
|
|
|
|
* the originating callers request.
|
|
|
|
*
|
|
|
|
* Called without a lock on the inode.
|
2007-11-23 05:29:11 +00:00
|
|
|
*
|
|
|
|
* We no longer bother to look at the incoming map - all we have to
|
|
|
|
* guarantee is that whatever we allocate fills the required range.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2010-12-10 08:42:20 +00:00
|
|
|
int
|
2005-04-16 22:20:36 +00:00
|
|
|
xfs_iomap_write_allocate(
|
|
|
|
xfs_inode_t *ip,
|
2016-10-03 16:11:34 +00:00
|
|
|
int whichfork,
|
2005-05-05 20:33:40 +00:00
|
|
|
xfs_off_t offset,
|
2018-07-17 23:51:52 +00:00
|
|
|
xfs_bmbt_irec_t *imap,
|
|
|
|
unsigned int *cow_seq)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
xfs_mount_t *mp = ip->i_mount;
|
2018-08-07 17:57:12 +00:00
|
|
|
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
|
2005-04-16 22:20:36 +00:00
|
|
|
xfs_fileoff_t offset_fsb, last_block;
|
|
|
|
xfs_fileoff_t end_fsb, map_start_fsb;
|
|
|
|
xfs_filblks_t count_fsb;
|
|
|
|
xfs_trans_t *tp;
|
xfs: eliminate committed arg from xfs_bmap_finish
Calls to xfs_bmap_finish() and xfs_trans_ijoin(), and the
associated comments were replicated several times across
the attribute code, all dealing with what to do if the
transaction was or wasn't committed.
And in that replicated code, an ASSERT() test of an
uninitialized variable occurs in several locations:
error = xfs_attr_thing(&args);
if (!error) {
error = xfs_bmap_finish(&args.trans, args.flist,
&committed);
}
if (error) {
ASSERT(committed);
If the first xfs_attr_thing() failed, we'd skip the xfs_bmap_finish,
never set "committed", and then test it in the ASSERT.
Fix this up by moving the committed state internal to xfs_bmap_finish,
and add a new inode argument. If an inode is passed in, it is passed
through to __xfs_trans_roll() and joined to the transaction there if
the transaction was committed.
xfs_qm_dqalloc() was a little unique in that it called bjoin rather
than ijoin, but as Dave points out we can detect the committed state
but checking whether (*tpp != tp).
Addresses-Coverity-Id: 102360
Addresses-Coverity-Id: 102361
Addresses-Coverity-Id: 102363
Addresses-Coverity-Id: 102364
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-01-11 00:34:01 +00:00
|
|
|
int nimaps;
|
2005-04-16 22:20:36 +00:00
|
|
|
int error = 0;
|
2017-01-20 17:31:54 +00:00
|
|
|
int flags = XFS_BMAPI_DELALLOC;
|
2005-04-16 22:20:36 +00:00
|
|
|
int nres;
|
|
|
|
|
2016-10-03 16:11:34 +00:00
|
|
|
if (whichfork == XFS_COW_FORK)
|
2017-02-02 23:14:02 +00:00
|
|
|
flags |= XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC;
|
2016-10-03 16:11:34 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Make sure that the dquots are there.
|
|
|
|
*/
|
2018-05-04 22:30:21 +00:00
|
|
|
error = xfs_qm_dqattach(ip);
|
2009-06-08 13:33:32 +00:00
|
|
|
if (error)
|
2014-06-22 05:04:54 +00:00
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-05-05 20:33:20 +00:00
|
|
|
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
2010-06-24 01:42:19 +00:00
|
|
|
count_fsb = imap->br_blockcount;
|
|
|
|
map_start_fsb = imap->br_startoff;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-10-12 07:21:22 +00:00
|
|
|
XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
while (count_fsb != 0) {
|
|
|
|
/*
|
|
|
|
* Set up a transaction with which to allocate the
|
|
|
|
* backing store for the file. Do allocations in a
|
|
|
|
* loop until we get some space in the range we are
|
|
|
|
* interested in. The other space that might be allocated
|
|
|
|
* is in the delayed allocation extent on which we sit
|
|
|
|
* but before our buffer starts.
|
|
|
|
*/
|
|
|
|
nimaps = 0;
|
|
|
|
while (nimaps == 0) {
|
|
|
|
nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
|
2016-08-16 22:30:28 +00:00
|
|
|
/*
|
|
|
|
* We have already reserved space for the extent and any
|
|
|
|
* indirect blocks when creating the delalloc extent,
|
|
|
|
* there is no need to reserve space in this transaction
|
|
|
|
* again.
|
|
|
|
*/
|
|
|
|
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0,
|
2016-04-05 23:19:55 +00:00
|
|
|
0, XFS_TRANS_RESERVE, &tp);
|
|
|
|
if (error)
|
2014-06-22 05:04:54 +00:00
|
|
|
return error;
|
2016-04-05 23:19:55 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
2011-09-19 15:00:54 +00:00
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
2007-11-23 05:29:11 +00:00
|
|
|
* it is possible that the extents have changed since
|
|
|
|
* we did the read call as we dropped the ilock for a
|
|
|
|
* while. We have to be careful about truncates or hole
|
|
|
|
* punchs here - we are not allowed to allocate
|
|
|
|
* non-delalloc blocks here.
|
|
|
|
*
|
|
|
|
* The only protection against truncation is the pages
|
|
|
|
* for the range we are being asked to convert are
|
|
|
|
* locked and hence a truncate will block on them
|
|
|
|
* first.
|
|
|
|
*
|
|
|
|
* As a result, if we go beyond the range we really
|
|
|
|
* need and hit an delalloc extent boundary followed by
|
|
|
|
* a hole while we have excess blocks in the map, we
|
|
|
|
* will fill the hole incorrectly and overrun the
|
|
|
|
* transaction reservation.
|
|
|
|
*
|
|
|
|
* Using a single map prevents this as we are forced to
|
|
|
|
* check each map we look for overlap with the desired
|
|
|
|
* range and abort as soon as we find it. Also, given
|
|
|
|
* that we only return a single map, having one beyond
|
|
|
|
* what we can return is probably a bit silly.
|
|
|
|
*
|
|
|
|
* We also need to check that we don't go beyond EOF;
|
|
|
|
* this is a truncate optimisation as a truncate sets
|
|
|
|
* the new file size before block on the pages we
|
|
|
|
* currently have locked under writeback. Because they
|
|
|
|
* are about to be tossed, we don't need to write them
|
|
|
|
* back....
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2007-11-23 05:29:11 +00:00
|
|
|
nimaps = 1;
|
2011-12-18 20:00:11 +00:00
|
|
|
end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
|
2014-04-14 08:58:05 +00:00
|
|
|
error = xfs_bmap_last_offset(ip, &last_block,
|
2008-04-10 02:21:59 +00:00
|
|
|
XFS_DATA_FORK);
|
|
|
|
if (error)
|
|
|
|
goto trans_cancel;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
|
|
|
|
if ((map_start_fsb + count_fsb) > last_block) {
|
|
|
|
count_fsb = last_block - map_start_fsb;
|
|
|
|
if (count_fsb == 0) {
|
2014-06-25 04:58:08 +00:00
|
|
|
error = -EAGAIN;
|
2005-04-16 22:20:36 +00:00
|
|
|
goto trans_cancel;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-06-24 01:42:19 +00:00
|
|
|
/*
|
|
|
|
* From this point onwards we overwrite the imap
|
|
|
|
* pointer that the caller gave to us.
|
|
|
|
*/
|
2011-09-18 20:40:52 +00:00
|
|
|
error = xfs_bmapi_write(tp, ip, map_start_fsb,
|
2018-07-12 05:26:25 +00:00
|
|
|
count_fsb, flags, nres, imap,
|
2018-07-12 05:26:23 +00:00
|
|
|
&nimaps);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (error)
|
|
|
|
goto trans_cancel;
|
|
|
|
|
2015-06-04 03:48:08 +00:00
|
|
|
error = xfs_trans_commit(tp);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (error)
|
|
|
|
goto error0;
|
|
|
|
|
2018-07-17 23:51:52 +00:00
|
|
|
if (whichfork == XFS_COW_FORK)
|
2018-08-07 17:57:12 +00:00
|
|
|
*cow_seq = READ_ONCE(ifp->if_seq);
|
2005-04-16 22:20:36 +00:00
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See if we were able to allocate an extent that
|
|
|
|
* covers at least part of the callers request
|
|
|
|
*/
|
2010-06-24 01:42:19 +00:00
|
|
|
if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
|
2011-03-06 23:06:35 +00:00
|
|
|
return xfs_alert_fsblock_zero(ip, imap);
|
2008-04-29 02:53:21 +00:00
|
|
|
|
2010-06-24 01:42:19 +00:00
|
|
|
if ((offset_fsb >= imap->br_startoff) &&
|
|
|
|
(offset_fsb < (imap->br_startoff +
|
|
|
|
imap->br_blockcount))) {
|
2015-10-12 07:21:22 +00:00
|
|
|
XFS_STATS_INC(mp, xs_xstrat_quick);
|
2007-11-23 05:29:11 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-11-23 05:29:11 +00:00
|
|
|
/*
|
|
|
|
* So far we have not mapped the requested part of the
|
2005-04-16 22:20:36 +00:00
|
|
|
* file, just surrounding data, try again.
|
|
|
|
*/
|
2010-06-24 01:42:19 +00:00
|
|
|
count_fsb -= imap->br_blockcount;
|
|
|
|
map_start_fsb = imap->br_startoff + imap->br_blockcount;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
trans_cancel:
|
2015-06-04 03:47:56 +00:00
|
|
|
xfs_trans_cancel(tp);
|
2005-04-16 22:20:36 +00:00
|
|
|
error0:
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
2014-06-22 05:04:54 +00:00
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
xfs_iomap_write_unwritten(
|
|
|
|
xfs_inode_t *ip,
|
2005-05-05 20:33:40 +00:00
|
|
|
xfs_off_t offset,
|
2017-09-21 18:26:18 +00:00
|
|
|
xfs_off_t count,
|
|
|
|
bool update_isize)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
xfs_mount_t *mp = ip->i_mount;
|
|
|
|
xfs_fileoff_t offset_fsb;
|
|
|
|
xfs_filblks_t count_fsb;
|
|
|
|
xfs_filblks_t numblks_fsb;
|
2006-01-11 04:28:28 +00:00
|
|
|
int nimaps;
|
|
|
|
xfs_trans_t *tp;
|
|
|
|
xfs_bmbt_irec_t imap;
|
2017-09-21 18:26:18 +00:00
|
|
|
struct inode *inode = VFS_I(ip);
|
2012-02-29 09:53:50 +00:00
|
|
|
xfs_fsize_t i_size;
|
2006-01-11 04:28:28 +00:00
|
|
|
uint resblks;
|
2005-04-16 22:20:36 +00:00
|
|
|
int error;
|
|
|
|
|
2009-12-14 23:14:59 +00:00
|
|
|
trace_xfs_unwritten_convert(ip, offset, count);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
|
|
|
count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
|
|
|
|
count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
|
|
|
|
|
2008-06-27 03:32:53 +00:00
|
|
|
/*
|
|
|
|
* Reserve enough blocks in this transaction for two complete extent
|
|
|
|
* btree splits. We may be converting the middle part of an unwritten
|
|
|
|
* extent and in this case we will insert two new extents in the btree
|
|
|
|
* each of which could cause a full split.
|
|
|
|
*
|
|
|
|
* This reservation amount will be used in the first call to
|
|
|
|
* xfs_bmbt_split() to select an AG with enough space to satisfy the
|
|
|
|
* rest of the operation.
|
|
|
|
*/
|
2006-01-11 04:28:28 +00:00
|
|
|
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-01-11 04:28:28 +00:00
|
|
|
do {
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2016-04-05 23:19:55 +00:00
|
|
|
* Set up a transaction to convert the range of extents
|
2005-04-16 22:20:36 +00:00
|
|
|
* from unwritten to real. Do allocations in a loop until
|
|
|
|
* we have covered the range passed in.
|
2009-10-19 04:00:03 +00:00
|
|
|
*
|
2016-04-05 23:19:55 +00:00
|
|
|
* Note that we can't risk to recursing back into the filesystem
|
|
|
|
* here as we might be asked to write out the same inode that we
|
|
|
|
* complete here and might deadlock on the iolock.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2016-04-05 23:19:55 +00:00
|
|
|
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
|
|
|
|
XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
|
|
|
|
if (error)
|
2014-06-22 05:04:54 +00:00
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
2011-09-19 15:00:54 +00:00
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Modify the unwritten extent state of the buffer.
|
|
|
|
*/
|
|
|
|
nimaps = 1;
|
2011-09-18 20:40:52 +00:00
|
|
|
error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
|
2018-07-12 05:26:25 +00:00
|
|
|
XFS_BMAPI_CONVERT, resblks, &imap,
|
|
|
|
&nimaps);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (error)
|
|
|
|
goto error_on_bmapi_transaction;
|
|
|
|
|
2012-02-29 09:53:50 +00:00
|
|
|
/*
|
|
|
|
* Log the updated inode size as we go. We have to be careful
|
|
|
|
* to only log it up to the actual write offset if it is
|
|
|
|
* halfway into a block.
|
|
|
|
*/
|
|
|
|
i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
|
|
|
|
if (i_size > offset + count)
|
|
|
|
i_size = offset + count;
|
2017-09-21 18:26:18 +00:00
|
|
|
if (update_isize && i_size > i_size_read(inode))
|
|
|
|
i_size_write(inode, i_size);
|
2012-02-29 09:53:50 +00:00
|
|
|
i_size = xfs_new_eof(ip, i_size);
|
|
|
|
if (i_size) {
|
|
|
|
ip->i_d.di_size = i_size;
|
|
|
|
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
|
|
|
}
|
|
|
|
|
2015-06-04 03:48:08 +00:00
|
|
|
error = xfs_trans_commit(tp);
|
2005-04-16 22:20:36 +00:00
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
if (error)
|
2014-06-22 05:04:54 +00:00
|
|
|
return error;
|
2006-09-28 01:03:20 +00:00
|
|
|
|
2008-04-29 02:53:21 +00:00
|
|
|
if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
|
2011-03-06 23:06:35 +00:00
|
|
|
return xfs_alert_fsblock_zero(ip, &imap);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if ((numblks_fsb = imap.br_blockcount) == 0) {
|
|
|
|
/*
|
|
|
|
* The numblks_fsb value should always get
|
|
|
|
* smaller, otherwise the loop is stuck.
|
|
|
|
*/
|
|
|
|
ASSERT(imap.br_blockcount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
offset_fsb += numblks_fsb;
|
|
|
|
count_fsb -= numblks_fsb;
|
|
|
|
} while (count_fsb > 0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error_on_bmapi_transaction:
|
2015-06-04 03:47:56 +00:00
|
|
|
xfs_trans_cancel(tp);
|
2005-04-16 22:20:36 +00:00
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
2014-06-22 05:04:54 +00:00
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2016-06-20 23:52:47 +00:00
|
|
|
|
2018-05-02 19:54:54 +00:00
|
|
|
static inline bool
|
|
|
|
imap_needs_alloc(
|
|
|
|
struct inode *inode,
|
|
|
|
struct xfs_bmbt_irec *imap,
|
|
|
|
int nimaps)
|
2016-06-20 23:53:44 +00:00
|
|
|
{
|
|
|
|
return !nimaps ||
|
|
|
|
imap->br_startblock == HOLESTARTBLOCK ||
|
2016-09-19 01:28:38 +00:00
|
|
|
imap->br_startblock == DELAYSTARTBLOCK ||
|
2017-03-28 21:53:36 +00:00
|
|
|
(IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN);
|
2016-06-20 23:53:44 +00:00
|
|
|
}
|
|
|
|
|
2018-05-02 19:54:54 +00:00
|
|
|
static inline bool
|
|
|
|
needs_cow_for_zeroing(
|
|
|
|
struct xfs_bmbt_irec *imap,
|
|
|
|
int nimaps)
|
2018-03-01 22:10:31 +00:00
|
|
|
{
|
|
|
|
return nimaps &&
|
|
|
|
imap->br_startblock != HOLESTARTBLOCK &&
|
|
|
|
imap->br_state != XFS_EXT_UNWRITTEN;
|
|
|
|
}
|
|
|
|
|
2018-05-02 19:54:54 +00:00
|
|
|
static int
|
|
|
|
xfs_ilock_for_iomap(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
unsigned flags,
|
|
|
|
unsigned *lockmode)
|
2016-11-30 03:37:15 +00:00
|
|
|
{
|
2018-05-02 19:54:54 +00:00
|
|
|
unsigned mode = XFS_ILOCK_SHARED;
|
2018-06-22 06:26:57 +00:00
|
|
|
bool is_write = flags & (IOMAP_WRITE | IOMAP_ZERO);
|
2018-05-02 19:54:54 +00:00
|
|
|
|
2016-11-30 03:37:15 +00:00
|
|
|
/*
|
2018-03-01 22:12:12 +00:00
|
|
|
* COW writes may allocate delalloc space or convert unwritten COW
|
|
|
|
* extents, so we need to make sure to take the lock exclusively here.
|
2016-11-30 03:37:15 +00:00
|
|
|
*/
|
2018-06-22 06:26:57 +00:00
|
|
|
if (xfs_is_reflink_inode(ip) && is_write) {
|
2018-05-02 19:54:54 +00:00
|
|
|
/*
|
|
|
|
* FIXME: It could still overwrite on unshared extents and not
|
|
|
|
* need allocation.
|
|
|
|
*/
|
|
|
|
if (flags & IOMAP_NOWAIT)
|
|
|
|
return -EAGAIN;
|
|
|
|
mode = XFS_ILOCK_EXCL;
|
|
|
|
}
|
2018-03-01 22:12:45 +00:00
|
|
|
|
|
|
|
/*
|
2018-05-02 19:54:54 +00:00
|
|
|
* Extents not yet cached requires exclusive access, don't block. This
|
|
|
|
* is an opencoded xfs_ilock_data_map_shared() call but with
|
2018-03-01 22:12:45 +00:00
|
|
|
* non-blocking behaviour.
|
|
|
|
*/
|
2018-05-02 19:54:54 +00:00
|
|
|
if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
|
|
|
|
if (flags & IOMAP_NOWAIT)
|
|
|
|
return -EAGAIN;
|
|
|
|
mode = XFS_ILOCK_EXCL;
|
|
|
|
}
|
|
|
|
|
2018-06-22 06:26:57 +00:00
|
|
|
relock:
|
2018-05-02 19:54:54 +00:00
|
|
|
if (flags & IOMAP_NOWAIT) {
|
|
|
|
if (!xfs_ilock_nowait(ip, mode))
|
|
|
|
return -EAGAIN;
|
|
|
|
} else {
|
|
|
|
xfs_ilock(ip, mode);
|
|
|
|
}
|
|
|
|
|
2018-06-22 06:26:57 +00:00
|
|
|
/*
|
|
|
|
* The reflink iflag could have changed since the earlier unlocked
|
|
|
|
* check, so if we got ILOCK_SHARED for a write and but we're now a
|
|
|
|
* reflink inode we have to switch to ILOCK_EXCL and relock.
|
|
|
|
*/
|
|
|
|
if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_reflink_inode(ip)) {
|
|
|
|
xfs_iunlock(ip, mode);
|
|
|
|
mode = XFS_ILOCK_EXCL;
|
|
|
|
goto relock;
|
|
|
|
}
|
|
|
|
|
2018-05-02 19:54:54 +00:00
|
|
|
*lockmode = mode;
|
|
|
|
return 0;
|
2016-11-30 03:37:15 +00:00
|
|
|
}
|
|
|
|
|
2016-06-20 23:53:44 +00:00
|
|
|
static int
|
|
|
|
xfs_file_iomap_begin(
|
|
|
|
struct inode *inode,
|
|
|
|
loff_t offset,
|
|
|
|
loff_t length,
|
|
|
|
unsigned flags,
|
|
|
|
struct iomap *iomap)
|
|
|
|
{
|
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_bmbt_irec imap;
|
|
|
|
xfs_fileoff_t offset_fsb, end_fsb;
|
|
|
|
int nimaps = 1, error = 0;
|
2018-10-18 06:19:48 +00:00
|
|
|
bool shared = false;
|
2016-09-19 01:26:39 +00:00
|
|
|
unsigned lockmode;
|
2016-06-20 23:53:44 +00:00
|
|
|
|
|
|
|
if (XFS_FORCED_SHUTDOWN(mp))
|
|
|
|
return -EIO;
|
|
|
|
|
2018-10-18 06:19:26 +00:00
|
|
|
if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && !(flags & IOMAP_DIRECT) &&
|
2016-11-30 03:37:15 +00:00
|
|
|
!IS_DAX(inode) && !xfs_get_extsz_hint(ip)) {
|
2016-10-03 16:11:33 +00:00
|
|
|
/* Reserve delalloc blocks for regular writeback. */
|
2018-10-18 06:19:26 +00:00
|
|
|
return xfs_file_iomap_begin_delay(inode, offset, length, flags,
|
|
|
|
iomap);
|
2016-09-19 01:10:21 +00:00
|
|
|
}
|
|
|
|
|
2018-05-02 19:54:54 +00:00
|
|
|
/*
|
|
|
|
* Lock the inode in the manner required for the specified operation and
|
|
|
|
* check for as many conditions that would result in blocking as
|
|
|
|
* possible. This removes most of the non-blocking checks from the
|
|
|
|
* mapping code below.
|
|
|
|
*/
|
|
|
|
error = xfs_ilock_for_iomap(ip, flags, &lockmode);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2017-06-20 12:05:48 +00:00
|
|
|
|
2016-06-20 23:53:44 +00:00
|
|
|
ASSERT(offset <= mp->m_super->s_maxbytes);
|
2017-12-22 21:14:34 +00:00
|
|
|
if (offset > mp->m_super->s_maxbytes - length)
|
2016-06-20 23:53:44 +00:00
|
|
|
length = mp->m_super->s_maxbytes - offset;
|
|
|
|
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
|
|
|
end_fsb = XFS_B_TO_FSB(mp, offset + length);
|
|
|
|
|
|
|
|
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
|
2016-10-03 16:11:36 +00:00
|
|
|
&nimaps, 0);
|
2016-10-20 04:53:50 +00:00
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
2016-10-03 16:11:36 +00:00
|
|
|
|
2017-02-06 18:51:03 +00:00
|
|
|
if (flags & IOMAP_REPORT) {
|
2016-10-20 04:53:32 +00:00
|
|
|
/* Trim the mapping to the nearest shared extent boundary. */
|
2018-10-18 06:19:48 +00:00
|
|
|
error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
|
2016-10-20 04:53:50 +00:00
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2018-05-02 19:54:53 +00:00
|
|
|
/* Non-modifying mapping requested, so we are done */
|
|
|
|
if (!(flags & (IOMAP_WRITE | IOMAP_ZERO)))
|
|
|
|
goto out_found;
|
|
|
|
|
2018-05-02 19:54:54 +00:00
|
|
|
/*
|
|
|
|
* Break shared extents if necessary. Checks for non-blocking IO have
|
|
|
|
* been done up front, so we don't need to do them here.
|
|
|
|
*/
|
|
|
|
if (xfs_is_reflink_inode(ip)) {
|
|
|
|
/* if zeroing doesn't need COW allocation, then we are done. */
|
|
|
|
if ((flags & IOMAP_ZERO) &&
|
|
|
|
!needs_cow_for_zeroing(&imap, nimaps))
|
|
|
|
goto out_found;
|
|
|
|
|
2017-02-06 18:51:03 +00:00
|
|
|
if (flags & IOMAP_DIRECT) {
|
|
|
|
/* may drop and re-acquire the ilock */
|
|
|
|
error = xfs_reflink_allocate_cow(ip, &imap, &shared,
|
|
|
|
&lockmode);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
} else {
|
2018-10-18 06:19:37 +00:00
|
|
|
error = xfs_reflink_reserve_cow(ip, &imap);
|
2017-02-06 18:51:03 +00:00
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
2016-10-20 04:53:50 +00:00
|
|
|
|
|
|
|
end_fsb = imap.br_startoff + imap.br_blockcount;
|
|
|
|
length = XFS_FSB_TO_B(mp, end_fsb) - offset;
|
2016-06-20 23:53:44 +00:00
|
|
|
}
|
|
|
|
|
2018-05-02 19:54:53 +00:00
|
|
|
/* Don't need to allocate over holes when doing zeroing operations. */
|
|
|
|
if (flags & IOMAP_ZERO)
|
|
|
|
goto out_found;
|
2016-06-20 23:53:44 +00:00
|
|
|
|
2018-05-02 19:54:53 +00:00
|
|
|
if (!imap_needs_alloc(inode, &imap, nimaps))
|
|
|
|
goto out_found;
|
2016-08-16 22:44:52 +00:00
|
|
|
|
2018-05-02 19:54:53 +00:00
|
|
|
/* If nowait is set bail since we are going to make allocations. */
|
|
|
|
if (flags & IOMAP_NOWAIT) {
|
|
|
|
error = -EAGAIN;
|
|
|
|
goto out_unlock;
|
2016-06-20 23:53:44 +00:00
|
|
|
}
|
|
|
|
|
2018-05-02 19:54:53 +00:00
|
|
|
/*
|
|
|
|
* We cap the maximum length we map to a sane size to keep the chunks
|
|
|
|
* of work done where somewhat symmetric with the work writeback does.
|
|
|
|
* This is a completely arbitrary number pulled out of thin air as a
|
|
|
|
* best guess for initial testing.
|
|
|
|
*
|
|
|
|
* Note that the values needs to be less than 32-bits wide until the
|
|
|
|
* lower level functions are updated.
|
|
|
|
*/
|
|
|
|
length = min_t(loff_t, length, 1024 * PAGE_SIZE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* xfs_iomap_write_direct() expects the shared lock. It is unlocked on
|
|
|
|
* return.
|
|
|
|
*/
|
|
|
|
if (lockmode == XFS_ILOCK_EXCL)
|
|
|
|
xfs_ilock_demote(ip, lockmode);
|
|
|
|
error = xfs_iomap_write_direct(ip, offset, length, &imap,
|
|
|
|
nimaps);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2018-06-19 22:10:58 +00:00
|
|
|
iomap->flags |= IOMAP_F_NEW;
|
2018-05-02 19:54:53 +00:00
|
|
|
trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
|
|
|
|
|
|
|
|
out_finish:
|
2017-11-14 00:38:44 +00:00
|
|
|
if (xfs_ipincount(ip) && (ip->i_itemp->ili_fsync_fields
|
|
|
|
& ~XFS_ILOG_TIMESTAMP))
|
2017-11-01 15:36:47 +00:00
|
|
|
iomap->flags |= IOMAP_F_DIRTY;
|
|
|
|
|
2016-08-16 22:44:52 +00:00
|
|
|
xfs_bmbt_to_iomap(ip, iomap, &imap);
|
2017-01-27 20:04:59 +00:00
|
|
|
|
2016-10-03 16:11:36 +00:00
|
|
|
if (shared)
|
|
|
|
iomap->flags |= IOMAP_F_SHARED;
|
2016-06-20 23:53:44 +00:00
|
|
|
return 0;
|
2018-05-02 19:54:53 +00:00
|
|
|
|
|
|
|
out_found:
|
|
|
|
ASSERT(nimaps);
|
|
|
|
xfs_iunlock(ip, lockmode);
|
|
|
|
trace_xfs_iomap_found(ip, offset, length, 0, &imap);
|
|
|
|
goto out_finish;
|
|
|
|
|
2016-10-20 04:53:50 +00:00
|
|
|
out_unlock:
|
|
|
|
xfs_iunlock(ip, lockmode);
|
|
|
|
return error;
|
2016-06-20 23:53:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
xfs_file_iomap_end_delalloc(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
loff_t offset,
|
|
|
|
loff_t length,
|
2017-03-08 17:58:08 +00:00
|
|
|
ssize_t written,
|
|
|
|
struct iomap *iomap)
|
2016-06-20 23:53:44 +00:00
|
|
|
{
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
xfs_fileoff_t start_fsb;
|
|
|
|
xfs_fileoff_t end_fsb;
|
|
|
|
int error = 0;
|
|
|
|
|
2017-03-08 17:58:08 +00:00
|
|
|
/*
|
|
|
|
* Behave as if the write failed if drop writes is enabled. Set the NEW
|
|
|
|
* flag to force delalloc cleanup.
|
|
|
|
*/
|
2017-06-21 00:54:48 +00:00
|
|
|
if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DROP_WRITES)) {
|
2017-03-08 17:58:08 +00:00
|
|
|
iomap->flags |= IOMAP_F_NEW;
|
2017-02-14 06:48:17 +00:00
|
|
|
written = 0;
|
2017-03-08 17:58:08 +00:00
|
|
|
}
|
2017-02-14 06:48:17 +00:00
|
|
|
|
2017-02-17 01:19:12 +00:00
|
|
|
/*
|
|
|
|
* start_fsb refers to the first unused block after a short write. If
|
|
|
|
* nothing was written, round offset down to point at the first block in
|
|
|
|
* the range.
|
|
|
|
*/
|
|
|
|
if (unlikely(!written))
|
|
|
|
start_fsb = XFS_B_TO_FSBT(mp, offset);
|
|
|
|
else
|
|
|
|
start_fsb = XFS_B_TO_FSB(mp, offset + written);
|
2016-06-20 23:53:44 +00:00
|
|
|
end_fsb = XFS_B_TO_FSB(mp, offset + length);
|
|
|
|
|
|
|
|
/*
|
2017-03-08 17:58:08 +00:00
|
|
|
* Trim delalloc blocks if they were allocated by this write and we
|
|
|
|
* didn't manage to write the whole range.
|
2016-06-20 23:53:44 +00:00
|
|
|
*
|
|
|
|
* We don't need to care about racing delalloc as we hold i_mutex
|
|
|
|
* across the reserve/allocate/unreserve calls. If there are delalloc
|
|
|
|
* blocks in the range, they are ours.
|
|
|
|
*/
|
2017-03-08 17:58:08 +00:00
|
|
|
if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
|
2017-02-17 01:19:12 +00:00
|
|
|
truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
|
|
|
|
XFS_FSB_TO_B(mp, end_fsb) - 1);
|
|
|
|
|
2016-06-20 23:53:44 +00:00
|
|
|
error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
|
|
|
|
end_fsb - start_fsb);
|
|
|
|
if (error && !XFS_FORCED_SHUTDOWN(mp)) {
|
|
|
|
xfs_alert(mp, "%s: unable to clean up ino %lld",
|
|
|
|
__func__, ip->i_ino);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
xfs_file_iomap_end(
|
|
|
|
struct inode *inode,
|
|
|
|
loff_t offset,
|
|
|
|
loff_t length,
|
|
|
|
ssize_t written,
|
|
|
|
unsigned flags,
|
|
|
|
struct iomap *iomap)
|
|
|
|
{
|
|
|
|
if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
|
|
|
|
return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
|
2017-03-08 17:58:08 +00:00
|
|
|
length, written, iomap);
|
2016-06-20 23:53:44 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-28 07:20:26 +00:00
|
|
|
const struct iomap_ops xfs_iomap_ops = {
|
2016-06-20 23:53:44 +00:00
|
|
|
.iomap_begin = xfs_file_iomap_begin,
|
|
|
|
.iomap_end = xfs_file_iomap_end,
|
|
|
|
};
|
2016-08-16 22:45:30 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
xfs_xattr_iomap_begin(
|
|
|
|
struct inode *inode,
|
|
|
|
loff_t offset,
|
|
|
|
loff_t length,
|
|
|
|
unsigned flags,
|
|
|
|
struct iomap *iomap)
|
|
|
|
{
|
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
|
|
|
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
|
|
|
|
struct xfs_bmbt_irec imap;
|
|
|
|
int nimaps = 1, error = 0;
|
|
|
|
unsigned lockmode;
|
|
|
|
|
|
|
|
if (XFS_FORCED_SHUTDOWN(mp))
|
|
|
|
return -EIO;
|
|
|
|
|
2017-04-06 23:00:39 +00:00
|
|
|
lockmode = xfs_ilock_attr_map_shared(ip);
|
2016-08-16 22:45:30 +00:00
|
|
|
|
|
|
|
/* if there are no attribute fork or extents, return ENOENT */
|
2017-04-06 23:00:39 +00:00
|
|
|
if (!XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
|
2016-08-16 22:45:30 +00:00
|
|
|
error = -ENOENT;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
|
|
|
|
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
|
2017-12-07 00:13:35 +00:00
|
|
|
&nimaps, XFS_BMAPI_ATTRFORK);
|
2016-08-16 22:45:30 +00:00
|
|
|
out_unlock:
|
|
|
|
xfs_iunlock(ip, lockmode);
|
|
|
|
|
|
|
|
if (!error) {
|
|
|
|
ASSERT(nimaps);
|
|
|
|
xfs_bmbt_to_iomap(ip, iomap, &imap);
|
|
|
|
}
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2017-01-28 07:20:26 +00:00
|
|
|
const struct iomap_ops xfs_xattr_iomap_ops = {
|
2016-08-16 22:45:30 +00:00
|
|
|
.iomap_begin = xfs_xattr_iomap_begin,
|
|
|
|
};
|