mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-15 21:30:43 +00:00
Merge branch 'raid56-experimental' into for-linus-3.9
Signed-off-by: Chris Mason <chris.mason@fusionio.com> Conflicts: fs/btrfs/ctree.h fs/btrfs/extent-tree.c fs/btrfs/inode.c fs/btrfs/volumes.c
This commit is contained in:
commit
e942f883bc
@ -6,6 +6,9 @@ config BTRFS_FS
|
||||
select ZLIB_DEFLATE
|
||||
select LZO_COMPRESS
|
||||
select LZO_DECOMPRESS
|
||||
select RAID6_PQ
|
||||
select XOR_BLOCKS
|
||||
|
||||
help
|
||||
Btrfs is a new filesystem with extents, writable snapshotting,
|
||||
support for multiple devices and many more features.
|
||||
|
@ -8,7 +8,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
|
||||
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
|
||||
export.o tree-log.o free-space-cache.o zlib.o lzo.o \
|
||||
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
|
||||
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o
|
||||
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o
|
||||
|
||||
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
|
||||
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
|
||||
|
@ -372,7 +372,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
||||
page = compressed_pages[pg_index];
|
||||
page->mapping = inode->i_mapping;
|
||||
if (bio->bi_size)
|
||||
ret = io_tree->ops->merge_bio_hook(page, 0,
|
||||
ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
|
||||
PAGE_CACHE_SIZE,
|
||||
bio, 0);
|
||||
else
|
||||
@ -655,7 +655,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||
page->index = em_start >> PAGE_CACHE_SHIFT;
|
||||
|
||||
if (comp_bio->bi_size)
|
||||
ret = tree->ops->merge_bio_hook(page, 0,
|
||||
ret = tree->ops->merge_bio_hook(READ, page, 0,
|
||||
PAGE_CACHE_SIZE,
|
||||
comp_bio, 0);
|
||||
else
|
||||
|
@ -506,6 +506,7 @@ struct btrfs_super_block {
|
||||
#define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5)
|
||||
|
||||
#define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF (1ULL << 6)
|
||||
#define BTRFS_FEATURE_INCOMPAT_RAID56 (1ULL << 7)
|
||||
|
||||
#define BTRFS_FEATURE_COMPAT_SUPP 0ULL
|
||||
#define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL
|
||||
@ -515,6 +516,7 @@ struct btrfs_super_block {
|
||||
BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
|
||||
BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
|
||||
BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
|
||||
BTRFS_FEATURE_INCOMPAT_RAID56 | \
|
||||
BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
|
||||
|
||||
/*
|
||||
@ -956,6 +958,8 @@ struct btrfs_dev_replace_item {
|
||||
#define BTRFS_BLOCK_GROUP_RAID1 (1ULL << 4)
|
||||
#define BTRFS_BLOCK_GROUP_DUP (1ULL << 5)
|
||||
#define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6)
|
||||
#define BTRFS_BLOCK_GROUP_RAID5 (1 << 7)
|
||||
#define BTRFS_BLOCK_GROUP_RAID6 (1 << 8)
|
||||
#define BTRFS_BLOCK_GROUP_RESERVED BTRFS_AVAIL_ALLOC_BIT_SINGLE
|
||||
|
||||
enum btrfs_raid_types {
|
||||
@ -964,6 +968,8 @@ enum btrfs_raid_types {
|
||||
BTRFS_RAID_DUP,
|
||||
BTRFS_RAID_RAID0,
|
||||
BTRFS_RAID_SINGLE,
|
||||
BTRFS_RAID_RAID5,
|
||||
BTRFS_RAID_RAID6,
|
||||
BTRFS_NR_RAID_TYPES
|
||||
};
|
||||
|
||||
@ -973,6 +979,8 @@ enum btrfs_raid_types {
|
||||
|
||||
#define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
|
||||
BTRFS_BLOCK_GROUP_RAID1 | \
|
||||
BTRFS_BLOCK_GROUP_RAID5 | \
|
||||
BTRFS_BLOCK_GROUP_RAID6 | \
|
||||
BTRFS_BLOCK_GROUP_DUP | \
|
||||
BTRFS_BLOCK_GROUP_RAID10)
|
||||
/*
|
||||
@ -1197,6 +1205,10 @@ struct btrfs_block_group_cache {
|
||||
u64 flags;
|
||||
u64 sectorsize;
|
||||
u64 cache_generation;
|
||||
|
||||
/* for raid56, this is a full stripe, without parity */
|
||||
unsigned long full_stripe_len;
|
||||
|
||||
unsigned int ro:1;
|
||||
unsigned int dirty:1;
|
||||
unsigned int iref:1;
|
||||
@ -1242,6 +1254,23 @@ enum btrfs_orphan_cleanup_state {
|
||||
ORPHAN_CLEANUP_DONE = 2,
|
||||
};
|
||||
|
||||
/* used by the raid56 code to lock stripes for read/modify/write */
|
||||
struct btrfs_stripe_hash {
|
||||
struct list_head hash_list;
|
||||
wait_queue_head_t wait;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
/* used by the raid56 code to lock stripes for read/modify/write */
|
||||
struct btrfs_stripe_hash_table {
|
||||
struct list_head stripe_cache;
|
||||
spinlock_t cache_lock;
|
||||
int cache_size;
|
||||
struct btrfs_stripe_hash table[];
|
||||
};
|
||||
|
||||
#define BTRFS_STRIPE_HASH_TABLE_BITS 11
|
||||
|
||||
/* fs_info */
|
||||
struct reloc_control;
|
||||
struct btrfs_device;
|
||||
@ -1341,6 +1370,13 @@ struct btrfs_fs_info {
|
||||
struct mutex cleaner_mutex;
|
||||
struct mutex chunk_mutex;
|
||||
struct mutex volume_mutex;
|
||||
|
||||
/* this is used during read/modify/write to make sure
|
||||
* no two ios are trying to mod the same stripe at the same
|
||||
* time
|
||||
*/
|
||||
struct btrfs_stripe_hash_table *stripe_hash_table;
|
||||
|
||||
/*
|
||||
* this protects the ordered operations list only while we are
|
||||
* processing all of the entries on it. This way we make
|
||||
@ -1423,6 +1459,8 @@ struct btrfs_fs_info {
|
||||
struct btrfs_workers flush_workers;
|
||||
struct btrfs_workers endio_workers;
|
||||
struct btrfs_workers endio_meta_workers;
|
||||
struct btrfs_workers endio_raid56_workers;
|
||||
struct btrfs_workers rmw_workers;
|
||||
struct btrfs_workers endio_meta_write_workers;
|
||||
struct btrfs_workers endio_write_workers;
|
||||
struct btrfs_workers endio_freespace_worker;
|
||||
@ -3490,9 +3528,9 @@ int btrfs_writepages(struct address_space *mapping,
|
||||
struct writeback_control *wbc);
|
||||
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *new_root, u64 new_dirid);
|
||||
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
|
||||
size_t size, struct bio *bio, unsigned long bio_flags);
|
||||
|
||||
int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
|
||||
size_t size, struct bio *bio,
|
||||
unsigned long bio_flags);
|
||||
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
int btrfs_readpage(struct file *file, struct page *page);
|
||||
void btrfs_evict_inode(struct inode *inode);
|
||||
|
@ -131,6 +131,15 @@ struct btrfs_delayed_ref_root {
|
||||
/* total number of head nodes ready for processing */
|
||||
unsigned long num_heads_ready;
|
||||
|
||||
/*
|
||||
* bumped when someone is making progress on the delayed
|
||||
* refs, so that other procs know they are just adding to
|
||||
* contention intead of helping
|
||||
*/
|
||||
atomic_t procs_running_refs;
|
||||
atomic_t ref_seq;
|
||||
wait_queue_head_t wait;
|
||||
|
||||
/*
|
||||
* set when the tree is flushing before a transaction commit,
|
||||
* used by the throttling code to decide if new updates need
|
||||
|
@ -46,6 +46,7 @@
|
||||
#include "check-integrity.h"
|
||||
#include "rcu-string.h"
|
||||
#include "dev-replace.h"
|
||||
#include "raid56.h"
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
#include <asm/cpufeature.h>
|
||||
@ -640,8 +641,15 @@ err:
|
||||
btree_readahead_hook(root, eb, eb->start, ret);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
if (ret) {
|
||||
/*
|
||||
* our io error hook is going to dec the io pages
|
||||
* again, we have to make sure it has something
|
||||
* to decrement
|
||||
*/
|
||||
atomic_inc(&eb->io_pages);
|
||||
clear_extent_buffer_uptodate(eb);
|
||||
}
|
||||
free_extent_buffer(eb);
|
||||
out:
|
||||
return ret;
|
||||
@ -655,6 +663,7 @@ static int btree_io_failed_hook(struct page *page, int failed_mirror)
|
||||
eb = (struct extent_buffer *)page->private;
|
||||
set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
|
||||
eb->read_mirror = failed_mirror;
|
||||
atomic_dec(&eb->io_pages);
|
||||
if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
|
||||
btree_readahead_hook(root, eb, eb->start, -EIO);
|
||||
return -EIO; /* we fixed nothing */
|
||||
@ -671,17 +680,23 @@ static void end_workqueue_bio(struct bio *bio, int err)
|
||||
end_io_wq->work.flags = 0;
|
||||
|
||||
if (bio->bi_rw & REQ_WRITE) {
|
||||
if (end_io_wq->metadata == 1)
|
||||
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
|
||||
btrfs_queue_worker(&fs_info->endio_meta_write_workers,
|
||||
&end_io_wq->work);
|
||||
else if (end_io_wq->metadata == 2)
|
||||
else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
|
||||
btrfs_queue_worker(&fs_info->endio_freespace_worker,
|
||||
&end_io_wq->work);
|
||||
else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
|
||||
btrfs_queue_worker(&fs_info->endio_raid56_workers,
|
||||
&end_io_wq->work);
|
||||
else
|
||||
btrfs_queue_worker(&fs_info->endio_write_workers,
|
||||
&end_io_wq->work);
|
||||
} else {
|
||||
if (end_io_wq->metadata)
|
||||
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
|
||||
btrfs_queue_worker(&fs_info->endio_raid56_workers,
|
||||
&end_io_wq->work);
|
||||
else if (end_io_wq->metadata)
|
||||
btrfs_queue_worker(&fs_info->endio_meta_workers,
|
||||
&end_io_wq->work);
|
||||
else
|
||||
@ -696,6 +711,7 @@ static void end_workqueue_bio(struct bio *bio, int err)
|
||||
* 0 - if data
|
||||
* 1 - if normal metadta
|
||||
* 2 - if writing to the free space cache area
|
||||
* 3 - raid parity work
|
||||
*/
|
||||
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
|
||||
int metadata)
|
||||
@ -2179,6 +2195,12 @@ int open_ctree(struct super_block *sb,
|
||||
init_waitqueue_head(&fs_info->transaction_blocked_wait);
|
||||
init_waitqueue_head(&fs_info->async_submit_wait);
|
||||
|
||||
ret = btrfs_alloc_stripe_hash_table(fs_info);
|
||||
if (ret) {
|
||||
err = -ENOMEM;
|
||||
goto fail_alloc;
|
||||
}
|
||||
|
||||
__setup_root(4096, 4096, 4096, 4096, tree_root,
|
||||
fs_info, BTRFS_ROOT_TREE_OBJECTID);
|
||||
|
||||
@ -2349,6 +2371,12 @@ int open_ctree(struct super_block *sb,
|
||||
btrfs_init_workers(&fs_info->endio_meta_write_workers,
|
||||
"endio-meta-write", fs_info->thread_pool_size,
|
||||
&fs_info->generic_worker);
|
||||
btrfs_init_workers(&fs_info->endio_raid56_workers,
|
||||
"endio-raid56", fs_info->thread_pool_size,
|
||||
&fs_info->generic_worker);
|
||||
btrfs_init_workers(&fs_info->rmw_workers,
|
||||
"rmw", fs_info->thread_pool_size,
|
||||
&fs_info->generic_worker);
|
||||
btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
|
||||
fs_info->thread_pool_size,
|
||||
&fs_info->generic_worker);
|
||||
@ -2367,6 +2395,8 @@ int open_ctree(struct super_block *sb,
|
||||
*/
|
||||
fs_info->endio_workers.idle_thresh = 4;
|
||||
fs_info->endio_meta_workers.idle_thresh = 4;
|
||||
fs_info->endio_raid56_workers.idle_thresh = 4;
|
||||
fs_info->rmw_workers.idle_thresh = 2;
|
||||
|
||||
fs_info->endio_write_workers.idle_thresh = 2;
|
||||
fs_info->endio_meta_write_workers.idle_thresh = 2;
|
||||
@ -2383,6 +2413,8 @@ int open_ctree(struct super_block *sb,
|
||||
ret |= btrfs_start_workers(&fs_info->fixup_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->endio_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->rmw_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->endio_raid56_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->endio_write_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
|
||||
@ -2726,6 +2758,8 @@ fail_sb_buffer:
|
||||
btrfs_stop_workers(&fs_info->workers);
|
||||
btrfs_stop_workers(&fs_info->endio_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_meta_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_raid56_workers);
|
||||
btrfs_stop_workers(&fs_info->rmw_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_meta_write_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_write_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_freespace_worker);
|
||||
@ -2747,6 +2781,7 @@ fail_bdi:
|
||||
fail_srcu:
|
||||
cleanup_srcu_struct(&fs_info->subvol_srcu);
|
||||
fail:
|
||||
btrfs_free_stripe_hash_table(fs_info);
|
||||
btrfs_close_devices(fs_info->fs_devices);
|
||||
return err;
|
||||
|
||||
@ -3094,11 +3129,16 @@ int btrfs_calc_num_tolerated_disk_barrier_failures(
|
||||
((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
|
||||
== 0)))
|
||||
num_tolerated_disk_barrier_failures = 0;
|
||||
else if (num_tolerated_disk_barrier_failures > 1
|
||||
&&
|
||||
(flags & (BTRFS_BLOCK_GROUP_RAID1 |
|
||||
BTRFS_BLOCK_GROUP_RAID10)))
|
||||
num_tolerated_disk_barrier_failures = 1;
|
||||
else if (num_tolerated_disk_barrier_failures > 1) {
|
||||
if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
|
||||
BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID10)) {
|
||||
num_tolerated_disk_barrier_failures = 1;
|
||||
} else if (flags &
|
||||
BTRFS_BLOCK_GROUP_RAID5) {
|
||||
num_tolerated_disk_barrier_failures = 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
up_read(&sinfo->groups_sem);
|
||||
@ -3402,6 +3442,8 @@ int close_ctree(struct btrfs_root *root)
|
||||
btrfs_stop_workers(&fs_info->workers);
|
||||
btrfs_stop_workers(&fs_info->endio_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_meta_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_raid56_workers);
|
||||
btrfs_stop_workers(&fs_info->rmw_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_meta_write_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_write_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_freespace_worker);
|
||||
@ -3424,6 +3466,8 @@ int close_ctree(struct btrfs_root *root)
|
||||
bdi_destroy(&fs_info->bdi);
|
||||
cleanup_srcu_struct(&fs_info->subvol_srcu);
|
||||
|
||||
btrfs_free_stripe_hash_table(fs_info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,13 @@
|
||||
#define BTRFS_SUPER_MIRROR_MAX 3
|
||||
#define BTRFS_SUPER_MIRROR_SHIFT 12
|
||||
|
||||
enum {
|
||||
BTRFS_WQ_ENDIO_DATA = 0,
|
||||
BTRFS_WQ_ENDIO_METADATA = 1,
|
||||
BTRFS_WQ_ENDIO_FREE_SPACE = 2,
|
||||
BTRFS_WQ_ENDIO_RAID56 = 3,
|
||||
};
|
||||
|
||||
static inline u64 btrfs_sb_offset(int mirror)
|
||||
{
|
||||
u64 start = 16 * 1024;
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "print-tree.h"
|
||||
#include "transaction.h"
|
||||
#include "volumes.h"
|
||||
#include "raid56.h"
|
||||
#include "locking.h"
|
||||
#include "free-space-cache.h"
|
||||
#include "math.h"
|
||||
@ -1852,6 +1853,8 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
|
||||
*actual_bytes = discarded_bytes;
|
||||
|
||||
|
||||
if (ret == -EOPNOTSUPP)
|
||||
ret = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2440,6 +2443,16 @@ int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
|
||||
int count)
|
||||
{
|
||||
int val = atomic_read(&delayed_refs->ref_seq);
|
||||
|
||||
if (val < seq || val >= seq + count)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* this starts processing the delayed reference count updates and
|
||||
* extent insertions we have queued up so far. count can be
|
||||
@ -2474,6 +2487,44 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
||||
|
||||
delayed_refs = &trans->transaction->delayed_refs;
|
||||
INIT_LIST_HEAD(&cluster);
|
||||
if (count == 0) {
|
||||
count = delayed_refs->num_entries * 2;
|
||||
run_most = 1;
|
||||
}
|
||||
|
||||
if (!run_all && !run_most) {
|
||||
int old;
|
||||
int seq = atomic_read(&delayed_refs->ref_seq);
|
||||
|
||||
progress:
|
||||
old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
|
||||
if (old) {
|
||||
DEFINE_WAIT(__wait);
|
||||
if (delayed_refs->num_entries < 16348)
|
||||
return 0;
|
||||
|
||||
prepare_to_wait(&delayed_refs->wait, &__wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
||||
old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
|
||||
if (old) {
|
||||
schedule();
|
||||
finish_wait(&delayed_refs->wait, &__wait);
|
||||
|
||||
if (!refs_newer(delayed_refs, seq, 256))
|
||||
goto progress;
|
||||
else
|
||||
return 0;
|
||||
} else {
|
||||
finish_wait(&delayed_refs->wait, &__wait);
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
atomic_inc(&delayed_refs->procs_running_refs);
|
||||
}
|
||||
|
||||
again:
|
||||
loops = 0;
|
||||
spin_lock(&delayed_refs->lock);
|
||||
@ -2482,10 +2533,6 @@ again:
|
||||
delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
|
||||
#endif
|
||||
|
||||
if (count == 0) {
|
||||
count = delayed_refs->num_entries * 2;
|
||||
run_most = 1;
|
||||
}
|
||||
while (1) {
|
||||
if (!(run_all || run_most) &&
|
||||
delayed_refs->num_heads_ready < 64)
|
||||
@ -2508,9 +2555,12 @@ again:
|
||||
btrfs_release_ref_cluster(&cluster);
|
||||
spin_unlock(&delayed_refs->lock);
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
atomic_dec(&delayed_refs->procs_running_refs);
|
||||
return ret;
|
||||
}
|
||||
|
||||
atomic_add(ret, &delayed_refs->ref_seq);
|
||||
|
||||
count -= min_t(unsigned long, ret, count);
|
||||
|
||||
if (count == 0)
|
||||
@ -2579,6 +2629,11 @@ again:
|
||||
goto again;
|
||||
}
|
||||
out:
|
||||
atomic_dec(&delayed_refs->procs_running_refs);
|
||||
smp_mb();
|
||||
if (waitqueue_active(&delayed_refs->wait))
|
||||
wake_up(&delayed_refs->wait);
|
||||
|
||||
spin_unlock(&delayed_refs->lock);
|
||||
assert_qgroups_uptodate(trans);
|
||||
return 0;
|
||||
@ -3284,6 +3339,7 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
|
||||
u64 num_devices = root->fs_info->fs_devices->rw_devices +
|
||||
root->fs_info->fs_devices->missing_devices;
|
||||
u64 target;
|
||||
u64 tmp;
|
||||
|
||||
/*
|
||||
* see if restripe for this chunk_type is in progress, if so
|
||||
@ -3300,30 +3356,32 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
|
||||
}
|
||||
spin_unlock(&root->fs_info->balance_lock);
|
||||
|
||||
/* First, mask out the RAID levels which aren't possible */
|
||||
if (num_devices == 1)
|
||||
flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
|
||||
flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
|
||||
BTRFS_BLOCK_GROUP_RAID5);
|
||||
if (num_devices < 3)
|
||||
flags &= ~BTRFS_BLOCK_GROUP_RAID6;
|
||||
if (num_devices < 4)
|
||||
flags &= ~BTRFS_BLOCK_GROUP_RAID10;
|
||||
|
||||
if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
|
||||
(flags & (BTRFS_BLOCK_GROUP_RAID1 |
|
||||
BTRFS_BLOCK_GROUP_RAID10))) {
|
||||
flags &= ~BTRFS_BLOCK_GROUP_DUP;
|
||||
}
|
||||
tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
|
||||
BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
|
||||
flags &= ~tmp;
|
||||
|
||||
if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
|
||||
(flags & BTRFS_BLOCK_GROUP_RAID10)) {
|
||||
flags &= ~BTRFS_BLOCK_GROUP_RAID1;
|
||||
}
|
||||
if (tmp & BTRFS_BLOCK_GROUP_RAID6)
|
||||
tmp = BTRFS_BLOCK_GROUP_RAID6;
|
||||
else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
|
||||
tmp = BTRFS_BLOCK_GROUP_RAID5;
|
||||
else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
|
||||
tmp = BTRFS_BLOCK_GROUP_RAID10;
|
||||
else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
|
||||
tmp = BTRFS_BLOCK_GROUP_RAID1;
|
||||
else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
|
||||
tmp = BTRFS_BLOCK_GROUP_RAID0;
|
||||
|
||||
if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
|
||||
((flags & BTRFS_BLOCK_GROUP_RAID1) |
|
||||
(flags & BTRFS_BLOCK_GROUP_RAID10) |
|
||||
(flags & BTRFS_BLOCK_GROUP_DUP))) {
|
||||
flags &= ~BTRFS_BLOCK_GROUP_RAID0;
|
||||
}
|
||||
|
||||
return extended_to_chunk(flags);
|
||||
return extended_to_chunk(flags | tmp);
|
||||
}
|
||||
|
||||
static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
|
||||
@ -3347,6 +3405,7 @@ static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
|
||||
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
|
||||
{
|
||||
u64 flags;
|
||||
u64 ret;
|
||||
|
||||
if (data)
|
||||
flags = BTRFS_BLOCK_GROUP_DATA;
|
||||
@ -3355,7 +3414,8 @@ u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
|
||||
else
|
||||
flags = BTRFS_BLOCK_GROUP_METADATA;
|
||||
|
||||
return get_alloc_profile(root, flags);
|
||||
ret = get_alloc_profile(root, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3530,8 +3590,10 @@ static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
|
||||
{
|
||||
u64 num_dev;
|
||||
|
||||
if (type & BTRFS_BLOCK_GROUP_RAID10 ||
|
||||
type & BTRFS_BLOCK_GROUP_RAID0)
|
||||
if (type & (BTRFS_BLOCK_GROUP_RAID10 |
|
||||
BTRFS_BLOCK_GROUP_RAID0 |
|
||||
BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6))
|
||||
num_dev = root->fs_info->fs_devices->rw_devices;
|
||||
else if (type & BTRFS_BLOCK_GROUP_RAID1)
|
||||
num_dev = 2;
|
||||
@ -3706,7 +3768,9 @@ static int can_overcommit(struct btrfs_root *root,
|
||||
|
||||
/*
|
||||
* If we have dup, raid1 or raid10 then only half of the free
|
||||
* space is actually useable.
|
||||
* space is actually useable. For raid56, the space info used
|
||||
* doesn't include the parity drive, so we don't have to
|
||||
* change the math
|
||||
*/
|
||||
if (profile & (BTRFS_BLOCK_GROUP_DUP |
|
||||
BTRFS_BLOCK_GROUP_RAID1 |
|
||||
@ -5539,10 +5603,14 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u64 stripe_align(struct btrfs_root *root, u64 val)
|
||||
static u64 stripe_align(struct btrfs_root *root,
|
||||
struct btrfs_block_group_cache *cache,
|
||||
u64 val, u64 num_bytes)
|
||||
{
|
||||
u64 mask = ((u64)root->stripesize - 1);
|
||||
u64 ret = (val + mask) & ~mask;
|
||||
u64 mask;
|
||||
u64 ret;
|
||||
mask = ((u64)root->stripesize - 1);
|
||||
ret = (val + mask) & ~mask;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -5599,8 +5667,12 @@ int __get_raid_index(u64 flags)
|
||||
return BTRFS_RAID_DUP;
|
||||
else if (flags & BTRFS_BLOCK_GROUP_RAID0)
|
||||
return BTRFS_RAID_RAID0;
|
||||
else
|
||||
return BTRFS_RAID_SINGLE;
|
||||
else if (flags & BTRFS_BLOCK_GROUP_RAID5)
|
||||
return BTRFS_RAID_RAID5;
|
||||
else if (flags & BTRFS_BLOCK_GROUP_RAID6)
|
||||
return BTRFS_RAID_RAID6;
|
||||
|
||||
return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
|
||||
}
|
||||
|
||||
static int get_block_group_index(struct btrfs_block_group_cache *cache)
|
||||
@ -5743,6 +5815,8 @@ search:
|
||||
if (!block_group_bits(block_group, data)) {
|
||||
u64 extra = BTRFS_BLOCK_GROUP_DUP |
|
||||
BTRFS_BLOCK_GROUP_RAID1 |
|
||||
BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6 |
|
||||
BTRFS_BLOCK_GROUP_RAID10;
|
||||
|
||||
/*
|
||||
@ -5771,6 +5845,7 @@ have_block_group:
|
||||
* lets look there
|
||||
*/
|
||||
if (last_ptr) {
|
||||
unsigned long aligned_cluster;
|
||||
/*
|
||||
* the refill lock keeps out other
|
||||
* people trying to start a new cluster
|
||||
@ -5837,11 +5912,15 @@ refill_cluster:
|
||||
goto unclustered_alloc;
|
||||
}
|
||||
|
||||
aligned_cluster = max_t(unsigned long,
|
||||
empty_cluster + empty_size,
|
||||
block_group->full_stripe_len);
|
||||
|
||||
/* allocate a cluster in this block group */
|
||||
ret = btrfs_find_space_cluster(trans, root,
|
||||
block_group, last_ptr,
|
||||
search_start, num_bytes,
|
||||
empty_cluster + empty_size);
|
||||
aligned_cluster);
|
||||
if (ret == 0) {
|
||||
/*
|
||||
* now pull our allocation out of this
|
||||
@ -5912,7 +5991,8 @@ unclustered_alloc:
|
||||
goto loop;
|
||||
}
|
||||
checks:
|
||||
search_start = stripe_align(root, offset);
|
||||
search_start = stripe_align(root, used_block_group,
|
||||
offset, num_bytes);
|
||||
|
||||
/* move on to the next group */
|
||||
if (search_start + num_bytes >
|
||||
@ -7284,6 +7364,7 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
|
||||
root->fs_info->fs_devices->missing_devices;
|
||||
|
||||
stripped = BTRFS_BLOCK_GROUP_RAID0 |
|
||||
BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
|
||||
BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
|
||||
|
||||
if (num_devices == 1) {
|
||||
@ -7837,7 +7918,9 @@ int btrfs_read_block_groups(struct btrfs_root *root)
|
||||
btrfs_release_path(path);
|
||||
cache->flags = btrfs_block_group_flags(&cache->item);
|
||||
cache->sectorsize = root->sectorsize;
|
||||
|
||||
cache->full_stripe_len = btrfs_full_stripe_len(root,
|
||||
&root->fs_info->mapping_tree,
|
||||
found_key.objectid);
|
||||
btrfs_init_free_space_ctl(cache);
|
||||
|
||||
/*
|
||||
@ -7891,6 +7974,8 @@ int btrfs_read_block_groups(struct btrfs_root *root)
|
||||
if (!(get_alloc_profile(root, space_info->flags) &
|
||||
(BTRFS_BLOCK_GROUP_RAID10 |
|
||||
BTRFS_BLOCK_GROUP_RAID1 |
|
||||
BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6 |
|
||||
BTRFS_BLOCK_GROUP_DUP)))
|
||||
continue;
|
||||
/*
|
||||
@ -7966,6 +8051,9 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
|
||||
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
|
||||
cache->sectorsize = root->sectorsize;
|
||||
cache->fs_info = root->fs_info;
|
||||
cache->full_stripe_len = btrfs_full_stripe_len(root,
|
||||
&root->fs_info->mapping_tree,
|
||||
chunk_offset);
|
||||
|
||||
atomic_set(&cache->count, 1);
|
||||
spin_lock_init(&cache->lock);
|
||||
|
@ -1895,13 +1895,11 @@ static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
|
||||
if (ret)
|
||||
err = ret;
|
||||
|
||||
if (did_repair) {
|
||||
ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
|
||||
rec->start + rec->len - 1,
|
||||
EXTENT_DAMAGED, GFP_NOFS);
|
||||
if (ret && !err)
|
||||
err = ret;
|
||||
}
|
||||
ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
|
||||
rec->start + rec->len - 1,
|
||||
EXTENT_DAMAGED, GFP_NOFS);
|
||||
if (ret && !err)
|
||||
err = ret;
|
||||
|
||||
kfree(rec);
|
||||
return err;
|
||||
@ -1932,10 +1930,15 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
|
||||
u64 map_length = 0;
|
||||
u64 sector;
|
||||
struct btrfs_bio *bbio = NULL;
|
||||
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!mirror_num);
|
||||
|
||||
/* we can't repair anything in raid56 yet */
|
||||
if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
|
||||
return 0;
|
||||
|
||||
bio = bio_alloc(GFP_NOFS, 1);
|
||||
if (!bio)
|
||||
return -EIO;
|
||||
@ -2052,6 +2055,7 @@ static int clean_io_failure(u64 start, struct page *page)
|
||||
failrec->failed_mirror);
|
||||
did_repair = !ret;
|
||||
}
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
@ -2487,13 +2491,13 @@ static int __must_check submit_one_bio(int rw, struct bio *bio,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int merge_bio(struct extent_io_tree *tree, struct page *page,
|
||||
static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
|
||||
unsigned long offset, size_t size, struct bio *bio,
|
||||
unsigned long bio_flags)
|
||||
{
|
||||
int ret = 0;
|
||||
if (tree->ops && tree->ops->merge_bio_hook)
|
||||
ret = tree->ops->merge_bio_hook(page, offset, size, bio,
|
||||
ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
|
||||
bio_flags);
|
||||
BUG_ON(ret < 0);
|
||||
return ret;
|
||||
@ -2528,7 +2532,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
|
||||
sector;
|
||||
|
||||
if (prev_bio_flags != bio_flags || !contig ||
|
||||
merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
|
||||
merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
|
||||
bio_add_page(bio, page, page_size, offset) < page_size) {
|
||||
ret = submit_one_bio(rw, bio, mirror_num,
|
||||
prev_bio_flags);
|
||||
@ -4162,6 +4166,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
|
||||
|
||||
static void check_buffer_tree_ref(struct extent_buffer *eb)
|
||||
{
|
||||
int refs;
|
||||
/* the ref bit is tricky. We have to make sure it is set
|
||||
* if we have the buffer dirty. Otherwise the
|
||||
* code to free a buffer can end up dropping a dirty
|
||||
@ -4182,6 +4187,10 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
|
||||
* So bump the ref count first, then set the bit. If someone
|
||||
* beat us to it, drop the ref we added.
|
||||
*/
|
||||
refs = atomic_read(&eb->refs);
|
||||
if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
|
||||
return;
|
||||
|
||||
spin_lock(&eb->refs_lock);
|
||||
if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
|
||||
atomic_inc(&eb->refs);
|
||||
@ -4383,9 +4392,20 @@ static int release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
|
||||
|
||||
void free_extent_buffer(struct extent_buffer *eb)
|
||||
{
|
||||
int refs;
|
||||
int old;
|
||||
if (!eb)
|
||||
return;
|
||||
|
||||
while (1) {
|
||||
refs = atomic_read(&eb->refs);
|
||||
if (refs <= 3)
|
||||
break;
|
||||
old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
|
||||
if (old == refs)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&eb->refs_lock);
|
||||
if (atomic_read(&eb->refs) == 2 &&
|
||||
test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
|
||||
|
@ -72,7 +72,7 @@ struct extent_io_ops {
|
||||
int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
|
||||
int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
|
||||
extent_submit_bio_hook_t *submit_bio_hook;
|
||||
int (*merge_bio_hook)(struct page *page, unsigned long offset,
|
||||
int (*merge_bio_hook)(int rw, struct page *page, unsigned long offset,
|
||||
size_t size, struct bio *bio,
|
||||
unsigned long bio_flags);
|
||||
int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
|
||||
|
@ -1465,10 +1465,14 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
|
||||
}
|
||||
|
||||
static struct btrfs_free_space *
|
||||
find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
|
||||
find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
|
||||
unsigned long align)
|
||||
{
|
||||
struct btrfs_free_space *entry;
|
||||
struct rb_node *node;
|
||||
u64 ctl_off;
|
||||
u64 tmp;
|
||||
u64 align_off;
|
||||
int ret;
|
||||
|
||||
if (!ctl->free_space_offset.rb_node)
|
||||
@ -1483,15 +1487,34 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
|
||||
if (entry->bytes < *bytes)
|
||||
continue;
|
||||
|
||||
/* make sure the space returned is big enough
|
||||
* to match our requested alignment
|
||||
*/
|
||||
if (*bytes >= align) {
|
||||
ctl_off = entry->offset - ctl->start;
|
||||
tmp = ctl_off + align - 1;;
|
||||
do_div(tmp, align);
|
||||
tmp = tmp * align + ctl->start;
|
||||
align_off = tmp - entry->offset;
|
||||
} else {
|
||||
align_off = 0;
|
||||
tmp = entry->offset;
|
||||
}
|
||||
|
||||
if (entry->bytes < *bytes + align_off)
|
||||
continue;
|
||||
|
||||
if (entry->bitmap) {
|
||||
ret = search_bitmap(ctl, entry, offset, bytes);
|
||||
if (!ret)
|
||||
ret = search_bitmap(ctl, entry, &tmp, bytes);
|
||||
if (!ret) {
|
||||
*offset = tmp;
|
||||
return entry;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
*offset = entry->offset;
|
||||
*bytes = entry->bytes;
|
||||
*offset = tmp;
|
||||
*bytes = entry->bytes - align_off;
|
||||
return entry;
|
||||
}
|
||||
|
||||
@ -2101,9 +2124,12 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_free_space *entry = NULL;
|
||||
u64 bytes_search = bytes + empty_size;
|
||||
u64 ret = 0;
|
||||
u64 align_gap = 0;
|
||||
u64 align_gap_len = 0;
|
||||
|
||||
spin_lock(&ctl->tree_lock);
|
||||
entry = find_free_space(ctl, &offset, &bytes_search);
|
||||
entry = find_free_space(ctl, &offset, &bytes_search,
|
||||
block_group->full_stripe_len);
|
||||
if (!entry)
|
||||
goto out;
|
||||
|
||||
@ -2113,9 +2139,15 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
|
||||
if (!entry->bytes)
|
||||
free_bitmap(ctl, entry);
|
||||
} else {
|
||||
|
||||
unlink_free_space(ctl, entry);
|
||||
entry->offset += bytes;
|
||||
entry->bytes -= bytes;
|
||||
align_gap_len = offset - entry->offset;
|
||||
align_gap = entry->offset;
|
||||
|
||||
entry->offset = offset + bytes;
|
||||
WARN_ON(entry->bytes < bytes + align_gap_len);
|
||||
|
||||
entry->bytes -= bytes + align_gap_len;
|
||||
if (!entry->bytes)
|
||||
kmem_cache_free(btrfs_free_space_cachep, entry);
|
||||
else
|
||||
@ -2125,6 +2157,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
|
||||
out:
|
||||
spin_unlock(&ctl->tree_lock);
|
||||
|
||||
if (align_gap_len)
|
||||
__btrfs_add_free_space(ctl, align_gap, align_gap_len);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/btrfs.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include "compat.h"
|
||||
#include "ctree.h"
|
||||
#include "disk-io.h"
|
||||
@ -1605,7 +1606,7 @@ static void btrfs_clear_bit_hook(struct inode *inode,
|
||||
* extent_io.c merge_bio_hook, this must check the chunk tree to make sure
|
||||
* we don't create bios that span stripes or chunks
|
||||
*/
|
||||
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
|
||||
int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
|
||||
size_t size, struct bio *bio,
|
||||
unsigned long bio_flags)
|
||||
{
|
||||
@ -1620,7 +1621,7 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
|
||||
|
||||
length = bio->bi_size;
|
||||
map_length = length;
|
||||
ret = btrfs_map_block(root->fs_info, READ, logical,
|
||||
ret = btrfs_map_block(root->fs_info, rw, logical,
|
||||
&map_length, NULL, 0);
|
||||
/* Will always return 0 with map_multi == NULL */
|
||||
BUG_ON(ret < 0);
|
||||
@ -6464,19 +6465,24 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
|
||||
int async_submit = 0;
|
||||
|
||||
map_length = orig_bio->bi_size;
|
||||
ret = btrfs_map_block(root->fs_info, READ, start_sector << 9,
|
||||
ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
|
||||
&map_length, NULL, 0);
|
||||
if (ret) {
|
||||
bio_put(orig_bio);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (map_length >= orig_bio->bi_size) {
|
||||
bio = orig_bio;
|
||||
goto submit;
|
||||
}
|
||||
|
||||
async_submit = 1;
|
||||
/* async crcs make it difficult to collect full stripe writes. */
|
||||
if (btrfs_get_alloc_profile(root, 1) &
|
||||
(BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))
|
||||
async_submit = 0;
|
||||
else
|
||||
async_submit = 1;
|
||||
|
||||
bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
@ -6518,7 +6524,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
|
||||
bio->bi_end_io = btrfs_end_dio_bio;
|
||||
|
||||
map_length = orig_bio->bi_size;
|
||||
ret = btrfs_map_block(root->fs_info, READ,
|
||||
ret = btrfs_map_block(root->fs_info, rw,
|
||||
start_sector << 9,
|
||||
&map_length, NULL, 0);
|
||||
if (ret) {
|
||||
|
2080
fs/btrfs/raid56.c
Normal file
2080
fs/btrfs/raid56.c
Normal file
File diff suppressed because it is too large
Load Diff
51
fs/btrfs/raid56.h
Normal file
51
fs/btrfs/raid56.h
Normal file
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Fusion-io All rights reserved.
|
||||
* Copyright (C) 2012 Intel Corp. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*/
|
||||
|
||||
#ifndef __BTRFS_RAID56__
|
||||
#define __BTRFS_RAID56__
|
||||
static inline int nr_parity_stripes(struct map_lookup *map)
|
||||
{
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID5)
|
||||
return 1;
|
||||
else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
|
||||
return 2;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int nr_data_stripes(struct map_lookup *map)
|
||||
{
|
||||
return map->num_stripes - nr_parity_stripes(map);
|
||||
}
|
||||
#define RAID5_P_STRIPE ((u64)-2)
|
||||
#define RAID6_Q_STRIPE ((u64)-1)
|
||||
|
||||
#define is_parity_stripe(x) (((x) == RAID5_P_STRIPE) || \
|
||||
((x) == RAID6_Q_STRIPE))
|
||||
|
||||
int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
|
||||
struct btrfs_bio *bbio, u64 *raid_map,
|
||||
u64 stripe_len, int mirror_num);
|
||||
int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
|
||||
struct btrfs_bio *bbio, u64 *raid_map,
|
||||
u64 stripe_len);
|
||||
|
||||
int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);
|
||||
void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info);
|
||||
#endif
|
@ -28,6 +28,7 @@
|
||||
#include "dev-replace.h"
|
||||
#include "check-integrity.h"
|
||||
#include "rcu-string.h"
|
||||
#include "raid56.h"
|
||||
|
||||
/*
|
||||
* This is only the first step towards a full-features scrub. It reads all
|
||||
@ -2254,6 +2255,13 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
|
||||
struct btrfs_device *extent_dev;
|
||||
int extent_mirror_num;
|
||||
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
if (num >= nr_data_stripes(map)) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
nstripes = length;
|
||||
offset = 0;
|
||||
do_div(nstripes, map->stripe_len);
|
||||
|
@ -167,6 +167,9 @@ loop:
|
||||
|
||||
spin_lock_init(&cur_trans->commit_lock);
|
||||
spin_lock_init(&cur_trans->delayed_refs.lock);
|
||||
atomic_set(&cur_trans->delayed_refs.procs_running_refs, 0);
|
||||
atomic_set(&cur_trans->delayed_refs.ref_seq, 0);
|
||||
init_waitqueue_head(&cur_trans->delayed_refs.wait);
|
||||
|
||||
INIT_LIST_HEAD(&cur_trans->pending_snapshots);
|
||||
INIT_LIST_HEAD(&cur_trans->ordered_operations);
|
||||
@ -637,7 +640,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
|
||||
if (!list_empty(&trans->new_bgs))
|
||||
btrfs_create_pending_block_groups(trans, root);
|
||||
|
||||
while (count < 2) {
|
||||
while (count < 1) {
|
||||
unsigned long cur = trans->delayed_ref_updates;
|
||||
trans->delayed_ref_updates = 0;
|
||||
if (cur &&
|
||||
@ -649,6 +652,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
|
||||
}
|
||||
count++;
|
||||
}
|
||||
|
||||
btrfs_trans_release_metadata(trans, root);
|
||||
trans->block_rsv = NULL;
|
||||
|
||||
@ -744,7 +748,9 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
|
||||
struct extent_state *cached_state = NULL;
|
||||
u64 start = 0;
|
||||
u64 end;
|
||||
struct blk_plug plug;
|
||||
|
||||
blk_start_plug(&plug);
|
||||
while (!find_first_extent_bit(dirty_pages, start, &start, &end,
|
||||
mark, &cached_state)) {
|
||||
convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
|
||||
@ -758,6 +764,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
|
||||
}
|
||||
if (err)
|
||||
werr = err;
|
||||
blk_finish_plug(&plug);
|
||||
return werr;
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,8 @@
|
||||
#include <linux/capability.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/raid/pq.h>
|
||||
#include <asm/div64.h>
|
||||
#include "compat.h"
|
||||
#include "ctree.h"
|
||||
#include "extent_map.h"
|
||||
@ -32,6 +34,7 @@
|
||||
#include "transaction.h"
|
||||
#include "print-tree.h"
|
||||
#include "volumes.h"
|
||||
#include "raid56.h"
|
||||
#include "async-thread.h"
|
||||
#include "check-integrity.h"
|
||||
#include "rcu-string.h"
|
||||
@ -1465,6 +1468,21 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
|
||||
root->fs_info->fs_devices->rw_devices <= 2) {
|
||||
printk(KERN_ERR "btrfs: unable to go below two "
|
||||
"devices on raid5\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
|
||||
root->fs_info->fs_devices->rw_devices <= 3) {
|
||||
printk(KERN_ERR "btrfs: unable to go below three "
|
||||
"devices on raid6\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (strcmp(device_path, "missing") == 0) {
|
||||
struct list_head *devices;
|
||||
struct btrfs_device *tmp;
|
||||
@ -2726,11 +2744,15 @@ static int chunk_drange_filter(struct extent_buffer *leaf,
|
||||
return 0;
|
||||
|
||||
if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
|
||||
BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
|
||||
factor = 2;
|
||||
else
|
||||
factor = 1;
|
||||
factor = num_stripes / factor;
|
||||
BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
|
||||
factor = num_stripes / 2;
|
||||
} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
|
||||
factor = num_stripes - 1;
|
||||
} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
|
||||
factor = num_stripes - 2;
|
||||
} else {
|
||||
factor = num_stripes;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_stripes; i++) {
|
||||
stripe = btrfs_stripe_nr(chunk, i);
|
||||
@ -3090,7 +3112,9 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
|
||||
allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
|
||||
else
|
||||
allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
|
||||
BTRFS_BLOCK_GROUP_RAID10);
|
||||
BTRFS_BLOCK_GROUP_RAID10 |
|
||||
BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6);
|
||||
|
||||
if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
|
||||
(!alloc_profile_is_valid(bctl->data.target, 1) ||
|
||||
@ -3130,7 +3154,9 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
|
||||
|
||||
/* allow to reduce meta or sys integrity only if force set */
|
||||
allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
|
||||
BTRFS_BLOCK_GROUP_RAID10;
|
||||
BTRFS_BLOCK_GROUP_RAID10 |
|
||||
BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6;
|
||||
do {
|
||||
seq = read_seqbegin(&fs_info->profiles_lock);
|
||||
|
||||
@ -3204,11 +3230,6 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
|
||||
update_ioctl_balance_args(fs_info, 0, bargs);
|
||||
}
|
||||
|
||||
if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
|
||||
balance_need_close(fs_info)) {
|
||||
__cancel_balance(fs_info);
|
||||
}
|
||||
|
||||
wake_up(&fs_info->balance_wait_q);
|
||||
|
||||
return ret;
|
||||
@ -3611,8 +3632,46 @@ struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
|
||||
.devs_increment = 1,
|
||||
.ncopies = 1,
|
||||
},
|
||||
[BTRFS_RAID_RAID5] = {
|
||||
.sub_stripes = 1,
|
||||
.dev_stripes = 1,
|
||||
.devs_max = 0,
|
||||
.devs_min = 2,
|
||||
.devs_increment = 1,
|
||||
.ncopies = 2,
|
||||
},
|
||||
[BTRFS_RAID_RAID6] = {
|
||||
.sub_stripes = 1,
|
||||
.dev_stripes = 1,
|
||||
.devs_max = 0,
|
||||
.devs_min = 3,
|
||||
.devs_increment = 1,
|
||||
.ncopies = 3,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
|
||||
{
|
||||
/* TODO allow them to set a preferred stripe size */
|
||||
return 64 * 1024;
|
||||
}
|
||||
|
||||
static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
|
||||
{
|
||||
u64 features;
|
||||
|
||||
if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
|
||||
return;
|
||||
|
||||
features = btrfs_super_incompat_flags(info->super_copy);
|
||||
if (features & BTRFS_FEATURE_INCOMPAT_RAID56)
|
||||
return;
|
||||
|
||||
features |= BTRFS_FEATURE_INCOMPAT_RAID56;
|
||||
btrfs_set_super_incompat_flags(info->super_copy, features);
|
||||
printk(KERN_INFO "btrfs: setting RAID5/6 feature flag\n");
|
||||
}
|
||||
|
||||
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *extent_root,
|
||||
struct map_lookup **map_ret,
|
||||
@ -3628,6 +3687,8 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_device_info *devices_info = NULL;
|
||||
u64 total_avail;
|
||||
int num_stripes; /* total number of stripes to allocate */
|
||||
int data_stripes; /* number of stripes that count for
|
||||
block group size */
|
||||
int sub_stripes; /* sub_stripes info for map */
|
||||
int dev_stripes; /* stripes per dev */
|
||||
int devs_max; /* max devs to use */
|
||||
@ -3639,6 +3700,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
||||
u64 max_chunk_size;
|
||||
u64 stripe_size;
|
||||
u64 num_bytes;
|
||||
u64 raid_stripe_len = BTRFS_STRIPE_LEN;
|
||||
int ndevs;
|
||||
int i;
|
||||
int j;
|
||||
@ -3768,16 +3830,31 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
||||
stripe_size = devices_info[ndevs-1].max_avail;
|
||||
num_stripes = ndevs * dev_stripes;
|
||||
|
||||
/*
|
||||
* this will have to be fixed for RAID1 and RAID10 over
|
||||
* more drives
|
||||
*/
|
||||
data_stripes = num_stripes / ncopies;
|
||||
|
||||
if (stripe_size * ndevs > max_chunk_size * ncopies) {
|
||||
stripe_size = max_chunk_size * ncopies;
|
||||
do_div(stripe_size, ndevs);
|
||||
}
|
||||
|
||||
if (type & BTRFS_BLOCK_GROUP_RAID5) {
|
||||
raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
|
||||
btrfs_super_stripesize(info->super_copy));
|
||||
data_stripes = num_stripes - 1;
|
||||
}
|
||||
if (type & BTRFS_BLOCK_GROUP_RAID6) {
|
||||
raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
|
||||
btrfs_super_stripesize(info->super_copy));
|
||||
data_stripes = num_stripes - 2;
|
||||
}
|
||||
do_div(stripe_size, dev_stripes);
|
||||
|
||||
/* align to BTRFS_STRIPE_LEN */
|
||||
do_div(stripe_size, BTRFS_STRIPE_LEN);
|
||||
stripe_size *= BTRFS_STRIPE_LEN;
|
||||
do_div(stripe_size, raid_stripe_len);
|
||||
stripe_size *= raid_stripe_len;
|
||||
|
||||
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
|
||||
if (!map) {
|
||||
@ -3795,14 +3872,14 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
||||
}
|
||||
}
|
||||
map->sector_size = extent_root->sectorsize;
|
||||
map->stripe_len = BTRFS_STRIPE_LEN;
|
||||
map->io_align = BTRFS_STRIPE_LEN;
|
||||
map->io_width = BTRFS_STRIPE_LEN;
|
||||
map->stripe_len = raid_stripe_len;
|
||||
map->io_align = raid_stripe_len;
|
||||
map->io_width = raid_stripe_len;
|
||||
map->type = type;
|
||||
map->sub_stripes = sub_stripes;
|
||||
|
||||
*map_ret = map;
|
||||
num_bytes = stripe_size * (num_stripes / ncopies);
|
||||
num_bytes = stripe_size * data_stripes;
|
||||
|
||||
*stripe_size_out = stripe_size;
|
||||
*num_bytes_out = num_bytes;
|
||||
@ -3853,6 +3930,8 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
||||
}
|
||||
|
||||
free_extent_map(em);
|
||||
check_raid56_incompat_flag(extent_root->fs_info, type);
|
||||
|
||||
kfree(devices_info);
|
||||
return 0;
|
||||
|
||||
@ -4136,6 +4215,10 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
|
||||
ret = map->num_stripes;
|
||||
else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
|
||||
ret = map->sub_stripes;
|
||||
else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
|
||||
ret = 2;
|
||||
else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
|
||||
ret = 3;
|
||||
else
|
||||
ret = 1;
|
||||
free_extent_map(em);
|
||||
@ -4148,6 +4231,52 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
|
||||
return ret;
|
||||
}
|
||||
|
||||
unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
|
||||
struct btrfs_mapping_tree *map_tree,
|
||||
u64 logical)
|
||||
{
|
||||
struct extent_map *em;
|
||||
struct map_lookup *map;
|
||||
struct extent_map_tree *em_tree = &map_tree->map_tree;
|
||||
unsigned long len = root->sectorsize;
|
||||
|
||||
read_lock(&em_tree->lock);
|
||||
em = lookup_extent_mapping(em_tree, logical, len);
|
||||
read_unlock(&em_tree->lock);
|
||||
BUG_ON(!em);
|
||||
|
||||
BUG_ON(em->start > logical || em->start + em->len < logical);
|
||||
map = (struct map_lookup *)em->bdev;
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
len = map->stripe_len * nr_data_stripes(map);
|
||||
}
|
||||
free_extent_map(em);
|
||||
return len;
|
||||
}
|
||||
|
||||
int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
|
||||
u64 logical, u64 len, int mirror_num)
|
||||
{
|
||||
struct extent_map *em;
|
||||
struct map_lookup *map;
|
||||
struct extent_map_tree *em_tree = &map_tree->map_tree;
|
||||
int ret = 0;
|
||||
|
||||
read_lock(&em_tree->lock);
|
||||
em = lookup_extent_mapping(em_tree, logical, len);
|
||||
read_unlock(&em_tree->lock);
|
||||
BUG_ON(!em);
|
||||
|
||||
BUG_ON(em->start > logical || em->start + em->len < logical);
|
||||
map = (struct map_lookup *)em->bdev;
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6))
|
||||
ret = 1;
|
||||
free_extent_map(em);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int find_live_mirror(struct btrfs_fs_info *fs_info,
|
||||
struct map_lookup *map, int first, int num,
|
||||
int optimal, int dev_replace_is_ongoing)
|
||||
@ -4185,10 +4314,39 @@ static int find_live_mirror(struct btrfs_fs_info *fs_info,
|
||||
return optimal;
|
||||
}
|
||||
|
||||
static inline int parity_smaller(u64 a, u64 b)
|
||||
{
|
||||
return a > b;
|
||||
}
|
||||
|
||||
/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
|
||||
static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
|
||||
{
|
||||
struct btrfs_bio_stripe s;
|
||||
int i;
|
||||
u64 l;
|
||||
int again = 1;
|
||||
|
||||
while (again) {
|
||||
again = 0;
|
||||
for (i = 0; i < bbio->num_stripes - 1; i++) {
|
||||
if (parity_smaller(raid_map[i], raid_map[i+1])) {
|
||||
s = bbio->stripes[i];
|
||||
l = raid_map[i];
|
||||
bbio->stripes[i] = bbio->stripes[i+1];
|
||||
raid_map[i] = raid_map[i+1];
|
||||
bbio->stripes[i+1] = s;
|
||||
raid_map[i+1] = l;
|
||||
again = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
u64 logical, u64 *length,
|
||||
struct btrfs_bio **bbio_ret,
|
||||
int mirror_num)
|
||||
int mirror_num, u64 **raid_map_ret)
|
||||
{
|
||||
struct extent_map *em;
|
||||
struct map_lookup *map;
|
||||
@ -4200,6 +4358,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
u64 stripe_nr;
|
||||
u64 stripe_nr_orig;
|
||||
u64 stripe_nr_end;
|
||||
u64 stripe_len;
|
||||
u64 *raid_map = NULL;
|
||||
int stripe_index;
|
||||
int i;
|
||||
int ret = 0;
|
||||
@ -4211,6 +4371,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
int num_alloc_stripes;
|
||||
int patch_the_first_stripe_for_dev_replace = 0;
|
||||
u64 physical_to_patch_in_first_stripe = 0;
|
||||
u64 raid56_full_stripe_start = (u64)-1;
|
||||
|
||||
read_lock(&em_tree->lock);
|
||||
em = lookup_extent_mapping(em_tree, logical, *length);
|
||||
@ -4227,29 +4388,63 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
map = (struct map_lookup *)em->bdev;
|
||||
offset = logical - em->start;
|
||||
|
||||
if (mirror_num > map->num_stripes)
|
||||
mirror_num = 0;
|
||||
|
||||
stripe_len = map->stripe_len;
|
||||
stripe_nr = offset;
|
||||
/*
|
||||
* stripe_nr counts the total number of stripes we have to stride
|
||||
* to get to this block
|
||||
*/
|
||||
do_div(stripe_nr, map->stripe_len);
|
||||
do_div(stripe_nr, stripe_len);
|
||||
|
||||
stripe_offset = stripe_nr * map->stripe_len;
|
||||
stripe_offset = stripe_nr * stripe_len;
|
||||
BUG_ON(offset < stripe_offset);
|
||||
|
||||
/* stripe_offset is the offset of this block in its stripe*/
|
||||
stripe_offset = offset - stripe_offset;
|
||||
|
||||
if (rw & REQ_DISCARD)
|
||||
/* if we're here for raid56, we need to know the stripe aligned start */
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
|
||||
raid56_full_stripe_start = offset;
|
||||
|
||||
/* allow a write of a full stripe, but make sure we don't
|
||||
* allow straddling of stripes
|
||||
*/
|
||||
do_div(raid56_full_stripe_start, full_stripe_len);
|
||||
raid56_full_stripe_start *= full_stripe_len;
|
||||
}
|
||||
|
||||
if (rw & REQ_DISCARD) {
|
||||
/* we don't discard raid56 yet */
|
||||
if (map->type &
|
||||
(BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
*length = min_t(u64, em->len - offset, *length);
|
||||
else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
|
||||
/* we limit the length of each bio to what fits in a stripe */
|
||||
*length = min_t(u64, em->len - offset,
|
||||
map->stripe_len - stripe_offset);
|
||||
} else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
|
||||
u64 max_len;
|
||||
/* For writes to RAID[56], allow a full stripeset across all disks.
|
||||
For other RAID types and for RAID[56] reads, just allow a single
|
||||
stripe (on a single disk). */
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
|
||||
(rw & REQ_WRITE)) {
|
||||
max_len = stripe_len * nr_data_stripes(map) -
|
||||
(offset - raid56_full_stripe_start);
|
||||
} else {
|
||||
/* we limit the length of each bio to what fits in a stripe */
|
||||
max_len = stripe_len - stripe_offset;
|
||||
}
|
||||
*length = min_t(u64, em->len - offset, max_len);
|
||||
} else {
|
||||
*length = em->len - offset;
|
||||
}
|
||||
|
||||
/* This is for when we're called from btrfs_merge_bio_hook() and all
|
||||
it cares about is the length */
|
||||
if (!bbio_ret)
|
||||
goto out;
|
||||
|
||||
@ -4282,7 +4477,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
u64 physical_of_found = 0;
|
||||
|
||||
ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
|
||||
logical, &tmp_length, &tmp_bbio, 0);
|
||||
logical, &tmp_length, &tmp_bbio, 0, NULL);
|
||||
if (ret) {
|
||||
WARN_ON(tmp_bbio != NULL);
|
||||
goto out;
|
||||
@ -4348,6 +4543,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
do_div(stripe_nr_end, map->stripe_len);
|
||||
stripe_end_offset = stripe_nr_end * map->stripe_len -
|
||||
(offset + *length);
|
||||
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
|
||||
if (rw & REQ_DISCARD)
|
||||
num_stripes = min_t(u64, map->num_stripes,
|
||||
@ -4398,6 +4594,65 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
dev_replace_is_ongoing);
|
||||
mirror_num = stripe_index - old_stripe_index + 1;
|
||||
}
|
||||
|
||||
} else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
u64 tmp;
|
||||
|
||||
if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
|
||||
&& raid_map_ret) {
|
||||
int i, rot;
|
||||
|
||||
/* push stripe_nr back to the start of the full stripe */
|
||||
stripe_nr = raid56_full_stripe_start;
|
||||
do_div(stripe_nr, stripe_len);
|
||||
|
||||
stripe_index = do_div(stripe_nr, nr_data_stripes(map));
|
||||
|
||||
/* RAID[56] write or recovery. Return all stripes */
|
||||
num_stripes = map->num_stripes;
|
||||
max_errors = nr_parity_stripes(map);
|
||||
|
||||
raid_map = kmalloc(sizeof(u64) * num_stripes,
|
||||
GFP_NOFS);
|
||||
if (!raid_map) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Work out the disk rotation on this stripe-set */
|
||||
tmp = stripe_nr;
|
||||
rot = do_div(tmp, num_stripes);
|
||||
|
||||
/* Fill in the logical address of each stripe */
|
||||
tmp = stripe_nr * nr_data_stripes(map);
|
||||
for (i = 0; i < nr_data_stripes(map); i++)
|
||||
raid_map[(i+rot) % num_stripes] =
|
||||
em->start + (tmp + i) * map->stripe_len;
|
||||
|
||||
raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID6)
|
||||
raid_map[(i+rot+1) % num_stripes] =
|
||||
RAID6_Q_STRIPE;
|
||||
|
||||
*length = map->stripe_len;
|
||||
stripe_index = 0;
|
||||
stripe_offset = 0;
|
||||
} else {
|
||||
/*
|
||||
* Mirror #0 or #1 means the original data block.
|
||||
* Mirror #2 is RAID5 parity block.
|
||||
* Mirror #3 is RAID6 Q block.
|
||||
*/
|
||||
stripe_index = do_div(stripe_nr, nr_data_stripes(map));
|
||||
if (mirror_num > 1)
|
||||
stripe_index = nr_data_stripes(map) +
|
||||
mirror_num - 2;
|
||||
|
||||
/* We distribute the parity blocks across stripes */
|
||||
tmp = stripe_nr + stripe_index;
|
||||
stripe_index = do_div(tmp, map->num_stripes);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* after this do_div call, stripe_nr is the number of stripes
|
||||
@ -4506,8 +4761,11 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
|
||||
BTRFS_BLOCK_GROUP_RAID10 |
|
||||
BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_DUP)) {
|
||||
max_errors = 1;
|
||||
} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
|
||||
max_errors = 2;
|
||||
}
|
||||
}
|
||||
|
||||
@ -4608,6 +4866,10 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
|
||||
bbio->mirror_num = map->num_stripes + 1;
|
||||
}
|
||||
if (raid_map) {
|
||||
sort_parity_stripes(bbio, raid_map);
|
||||
*raid_map_ret = raid_map;
|
||||
}
|
||||
out:
|
||||
if (dev_replace_is_ongoing)
|
||||
btrfs_dev_replace_unlock(dev_replace);
|
||||
@ -4620,7 +4882,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
struct btrfs_bio **bbio_ret, int mirror_num)
|
||||
{
|
||||
return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
|
||||
mirror_num);
|
||||
mirror_num, NULL);
|
||||
}
|
||||
|
||||
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
||||
@ -4634,6 +4896,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
||||
u64 bytenr;
|
||||
u64 length;
|
||||
u64 stripe_nr;
|
||||
u64 rmap_len;
|
||||
int i, j, nr = 0;
|
||||
|
||||
read_lock(&em_tree->lock);
|
||||
@ -4644,10 +4907,17 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
||||
map = (struct map_lookup *)em->bdev;
|
||||
|
||||
length = em->len;
|
||||
rmap_len = map->stripe_len;
|
||||
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID10)
|
||||
do_div(length, map->num_stripes / map->sub_stripes);
|
||||
else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
|
||||
do_div(length, map->num_stripes);
|
||||
else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
do_div(length, nr_data_stripes(map));
|
||||
rmap_len = map->stripe_len * nr_data_stripes(map);
|
||||
}
|
||||
|
||||
buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
|
||||
BUG_ON(!buf); /* -ENOMEM */
|
||||
@ -4667,8 +4937,11 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
||||
do_div(stripe_nr, map->sub_stripes);
|
||||
} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
|
||||
stripe_nr = stripe_nr * map->num_stripes + i;
|
||||
}
|
||||
bytenr = chunk_start + stripe_nr * map->stripe_len;
|
||||
} /* else if RAID[56], multiply by nr_data_stripes().
|
||||
* Alternatively, just use rmap_len below instead of
|
||||
* map->stripe_len */
|
||||
|
||||
bytenr = chunk_start + stripe_nr * rmap_len;
|
||||
WARN_ON(nr >= map->num_stripes);
|
||||
for (j = 0; j < nr; j++) {
|
||||
if (buf[j] == bytenr)
|
||||
@ -4682,7 +4955,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
||||
|
||||
*logical = buf;
|
||||
*naddrs = nr;
|
||||
*stripe_len = map->stripe_len;
|
||||
*stripe_len = rmap_len;
|
||||
|
||||
free_extent_map(em);
|
||||
return 0;
|
||||
@ -4756,7 +5029,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
|
||||
bio->bi_bdev = (struct block_device *)
|
||||
(unsigned long)bbio->mirror_num;
|
||||
/* only send an error to the higher layers if it is
|
||||
* beyond the tolerance of the multi-bio
|
||||
* beyond the tolerance of the btrfs bio
|
||||
*/
|
||||
if (atomic_read(&bbio->error) > bbio->max_errors) {
|
||||
err = -EIO;
|
||||
@ -4790,13 +5063,18 @@ struct async_sched {
|
||||
* This will add one bio to the pending list for a device and make sure
|
||||
* the work struct is scheduled.
|
||||
*/
|
||||
static noinline void schedule_bio(struct btrfs_root *root,
|
||||
noinline void btrfs_schedule_bio(struct btrfs_root *root,
|
||||
struct btrfs_device *device,
|
||||
int rw, struct bio *bio)
|
||||
{
|
||||
int should_queue = 1;
|
||||
struct btrfs_pending_bios *pending_bios;
|
||||
|
||||
if (device->missing || !device->bdev) {
|
||||
bio_endio(bio, -EIO);
|
||||
return;
|
||||
}
|
||||
|
||||
/* don't bother with additional async steps for reads, right now */
|
||||
if (!(rw & REQ_WRITE)) {
|
||||
bio_get(bio);
|
||||
@ -4894,7 +5172,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
|
||||
#endif
|
||||
bio->bi_bdev = dev->bdev;
|
||||
if (async)
|
||||
schedule_bio(root, dev, rw, bio);
|
||||
btrfs_schedule_bio(root, dev, rw, bio);
|
||||
else
|
||||
btrfsic_submit_bio(rw, bio);
|
||||
}
|
||||
@ -4953,6 +5231,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
|
||||
u64 logical = (u64)bio->bi_sector << 9;
|
||||
u64 length = 0;
|
||||
u64 map_length;
|
||||
u64 *raid_map = NULL;
|
||||
int ret;
|
||||
int dev_nr = 0;
|
||||
int total_devs = 1;
|
||||
@ -4961,12 +5240,30 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
|
||||
length = bio->bi_size;
|
||||
map_length = length;
|
||||
|
||||
ret = btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
|
||||
mirror_num);
|
||||
if (ret)
|
||||
ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
|
||||
mirror_num, &raid_map);
|
||||
if (ret) /* -ENOMEM */
|
||||
return ret;
|
||||
|
||||
total_devs = bbio->num_stripes;
|
||||
bbio->orig_bio = first_bio;
|
||||
bbio->private = first_bio->bi_private;
|
||||
bbio->end_io = first_bio->bi_end_io;
|
||||
atomic_set(&bbio->stripes_pending, bbio->num_stripes);
|
||||
|
||||
if (raid_map) {
|
||||
/* In this case, map_length has been set to the length of
|
||||
a single stripe; not the whole write */
|
||||
if (rw & WRITE) {
|
||||
return raid56_parity_write(root, bio, bbio,
|
||||
raid_map, map_length);
|
||||
} else {
|
||||
return raid56_parity_recover(root, bio, bbio,
|
||||
raid_map, map_length,
|
||||
mirror_num);
|
||||
}
|
||||
}
|
||||
|
||||
if (map_length < length) {
|
||||
printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu "
|
||||
"len %llu\n", (unsigned long long)logical,
|
||||
@ -4975,11 +5272,6 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
|
||||
BUG();
|
||||
}
|
||||
|
||||
bbio->orig_bio = first_bio;
|
||||
bbio->private = first_bio->bi_private;
|
||||
bbio->end_io = first_bio->bi_end_io;
|
||||
atomic_set(&bbio->stripes_pending, bbio->num_stripes);
|
||||
|
||||
while (dev_nr < total_devs) {
|
||||
dev = bbio->stripes[dev_nr].dev;
|
||||
if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
|
||||
|
@ -321,7 +321,14 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
|
||||
void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_device *tgtdev);
|
||||
int btrfs_scratch_superblock(struct btrfs_device *device);
|
||||
|
||||
void btrfs_schedule_bio(struct btrfs_root *root,
|
||||
struct btrfs_device *device,
|
||||
int rw, struct bio *bio);
|
||||
int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
|
||||
u64 logical, u64 len, int mirror_num);
|
||||
unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
|
||||
struct btrfs_mapping_tree *map_tree,
|
||||
u64 logical);
|
||||
static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
|
||||
int index)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user