mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-12 02:40:53 +00:00
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block layer fixes from Jens Axboe: "A set of fixes for the current series. This contains: - A bunch of fixes for lightnvm, should be the last round for this series. From Matias and Wenwei. - A writeback detach inode fix from Ilya, also marked for stable. - A block (though it says SCSI) fix for an OOPS in SCSI runtime power management. - Module init error path fixes for null_blk from Minfei" * 'for-linus' of git://git.kernel.dk/linux-block: null_blk: Fix error path in module initialization lightnvm: do not compile in debugging by default lightnvm: prevent gennvm module unload on use lightnvm: fix media mgr registration lightnvm: replace req queue with nvmdev for lld lightnvm: comments on constants lightnvm: check mm before use lightnvm: refactor spin_unlock in gennvm_get_blk lightnvm: put blks when luns configure failed lightnvm: use flags in rrpc_get_blk block: detach bdev inode from its wb in __blkdev_put() SCSI: Fix NULL pointer dereference in runtime PM
This commit is contained in:
commit
7807563183
@ -3405,6 +3405,9 @@ int blk_pre_runtime_suspend(struct request_queue *q)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!q->dev)
|
||||
return ret;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (q->nr_pending) {
|
||||
ret = -EBUSY;
|
||||
@ -3432,6 +3435,9 @@ EXPORT_SYMBOL(blk_pre_runtime_suspend);
|
||||
*/
|
||||
void blk_post_runtime_suspend(struct request_queue *q, int err)
|
||||
{
|
||||
if (!q->dev)
|
||||
return;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (!err) {
|
||||
q->rpm_status = RPM_SUSPENDED;
|
||||
@ -3456,6 +3462,9 @@ EXPORT_SYMBOL(blk_post_runtime_suspend);
|
||||
*/
|
||||
void blk_pre_runtime_resume(struct request_queue *q)
|
||||
{
|
||||
if (!q->dev)
|
||||
return;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
q->rpm_status = RPM_RESUMING;
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
@ -3478,6 +3487,9 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
|
||||
*/
|
||||
void blk_post_runtime_resume(struct request_queue *q, int err)
|
||||
{
|
||||
if (!q->dev)
|
||||
return;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (!err) {
|
||||
q->rpm_status = RPM_ACTIVE;
|
||||
|
@ -444,8 +444,9 @@ static void null_lnvm_end_io(struct request *rq, int error)
|
||||
blk_put_request(rq);
|
||||
}
|
||||
|
||||
static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
|
||||
static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
{
|
||||
struct request_queue *q = dev->q;
|
||||
struct request *rq;
|
||||
struct bio *bio = rqd->bio;
|
||||
|
||||
@ -470,7 +471,7 @@ static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
|
||||
static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
|
||||
{
|
||||
sector_t size = gb * 1024 * 1024 * 1024ULL;
|
||||
sector_t blksize;
|
||||
@ -523,7 +524,7 @@ static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name)
|
||||
static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
|
||||
{
|
||||
mempool_t *virtmem_pool;
|
||||
|
||||
@ -541,7 +542,7 @@ static void null_lnvm_destroy_dma_pool(void *pool)
|
||||
mempool_destroy(pool);
|
||||
}
|
||||
|
||||
static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool,
|
||||
static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
|
||||
gfp_t mem_flags, dma_addr_t *dma_handler)
|
||||
{
|
||||
return mempool_alloc(pool, mem_flags);
|
||||
@ -765,7 +766,9 @@ out:
|
||||
|
||||
static int __init null_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int i;
|
||||
struct nullb *nullb;
|
||||
|
||||
if (bs > PAGE_SIZE) {
|
||||
pr_warn("null_blk: invalid block size\n");
|
||||
@ -807,22 +810,29 @@ static int __init null_init(void)
|
||||
0, 0, NULL);
|
||||
if (!ppa_cache) {
|
||||
pr_err("null_blk: unable to create ppa cache\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_devices; i++) {
|
||||
if (null_add_dev()) {
|
||||
unregister_blkdev(null_major, "nullb");
|
||||
ret = -ENOMEM;
|
||||
goto err_ppa;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_devices; i++) {
|
||||
ret = null_add_dev();
|
||||
if (ret)
|
||||
goto err_dev;
|
||||
}
|
||||
|
||||
pr_info("null: module loaded\n");
|
||||
return 0;
|
||||
err_ppa:
|
||||
|
||||
err_dev:
|
||||
while (!list_empty(&nullb_list)) {
|
||||
nullb = list_entry(nullb_list.next, struct nullb, list);
|
||||
null_del_dev(nullb);
|
||||
}
|
||||
kmem_cache_destroy(ppa_cache);
|
||||
return -EINVAL;
|
||||
err_ppa:
|
||||
unregister_blkdev(null_major, "nullb");
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit null_exit(void)
|
||||
|
@ -18,6 +18,7 @@ if NVM
|
||||
|
||||
config NVM_DEBUG
|
||||
bool "Open-Channel SSD debugging support"
|
||||
default n
|
||||
---help---
|
||||
Exposes a debug management interface to create/remove targets at:
|
||||
|
||||
|
@ -74,7 +74,7 @@ EXPORT_SYMBOL(nvm_unregister_target);
|
||||
void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
|
||||
dma_addr_t *dma_handler)
|
||||
{
|
||||
return dev->ops->dev_dma_alloc(dev->q, dev->ppalist_pool, mem_flags,
|
||||
return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags,
|
||||
dma_handler);
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_dev_dma_alloc);
|
||||
@ -97,15 +97,47 @@ static struct nvmm_type *nvm_find_mgr_type(const char *name)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
|
||||
{
|
||||
struct nvmm_type *mt;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&nvm_lock);
|
||||
|
||||
list_for_each_entry(mt, &nvm_mgrs, list) {
|
||||
ret = mt->register_mgr(dev);
|
||||
if (ret < 0) {
|
||||
pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
|
||||
ret, dev->name);
|
||||
return NULL; /* initialization failed */
|
||||
} else if (ret > 0)
|
||||
return mt;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int nvm_register_mgr(struct nvmm_type *mt)
|
||||
{
|
||||
struct nvm_dev *dev;
|
||||
int ret = 0;
|
||||
|
||||
down_write(&nvm_lock);
|
||||
if (nvm_find_mgr_type(mt->name))
|
||||
if (nvm_find_mgr_type(mt->name)) {
|
||||
ret = -EEXIST;
|
||||
else
|
||||
goto finish;
|
||||
} else {
|
||||
list_add(&mt->list, &nvm_mgrs);
|
||||
}
|
||||
|
||||
/* try to register media mgr if any device have none configured */
|
||||
list_for_each_entry(dev, &nvm_devices, devices) {
|
||||
if (dev->mt)
|
||||
continue;
|
||||
|
||||
dev->mt = nvm_init_mgr(dev);
|
||||
}
|
||||
finish:
|
||||
up_write(&nvm_lock);
|
||||
|
||||
return ret;
|
||||
@ -123,26 +155,6 @@ void nvm_unregister_mgr(struct nvmm_type *mt)
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_unregister_mgr);
|
||||
|
||||
/* register with device with a supported manager */
|
||||
static int register_mgr(struct nvm_dev *dev)
|
||||
{
|
||||
struct nvmm_type *mt;
|
||||
int ret = 0;
|
||||
|
||||
list_for_each_entry(mt, &nvm_mgrs, list) {
|
||||
ret = mt->register_mgr(dev);
|
||||
if (ret > 0) {
|
||||
dev->mt = mt;
|
||||
break; /* successfully initialized */
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
pr_info("nvm: no compatible nvm manager found.\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct nvm_dev *nvm_find_nvm_dev(const char *name)
|
||||
{
|
||||
struct nvm_dev *dev;
|
||||
@ -246,7 +258,7 @@ static int nvm_init(struct nvm_dev *dev)
|
||||
if (!dev->q || !dev->ops)
|
||||
return ret;
|
||||
|
||||
if (dev->ops->identity(dev->q, &dev->identity)) {
|
||||
if (dev->ops->identity(dev, &dev->identity)) {
|
||||
pr_err("nvm: device could not be identified\n");
|
||||
goto err;
|
||||
}
|
||||
@ -271,14 +283,6 @@ static int nvm_init(struct nvm_dev *dev)
|
||||
goto err;
|
||||
}
|
||||
|
||||
down_write(&nvm_lock);
|
||||
ret = register_mgr(dev);
|
||||
up_write(&nvm_lock);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
|
||||
dev->name, dev->sec_per_pg, dev->nr_planes,
|
||||
dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
|
||||
@ -326,8 +330,7 @@ int nvm_register(struct request_queue *q, char *disk_name,
|
||||
}
|
||||
|
||||
if (dev->ops->max_phys_sect > 1) {
|
||||
dev->ppalist_pool = dev->ops->create_dma_pool(dev->q,
|
||||
"ppalist");
|
||||
dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist");
|
||||
if (!dev->ppalist_pool) {
|
||||
pr_err("nvm: could not create ppa pool\n");
|
||||
ret = -ENOMEM;
|
||||
@ -335,7 +338,9 @@ int nvm_register(struct request_queue *q, char *disk_name,
|
||||
}
|
||||
}
|
||||
|
||||
/* register device with a supported media manager */
|
||||
down_write(&nvm_lock);
|
||||
dev->mt = nvm_init_mgr(dev);
|
||||
list_add(&dev->devices, &nvm_devices);
|
||||
up_write(&nvm_lock);
|
||||
|
||||
@ -380,19 +385,13 @@ static int nvm_create_target(struct nvm_dev *dev,
|
||||
struct nvm_tgt_type *tt;
|
||||
struct nvm_target *t;
|
||||
void *targetdata;
|
||||
int ret = 0;
|
||||
|
||||
down_write(&nvm_lock);
|
||||
if (!dev->mt) {
|
||||
ret = register_mgr(dev);
|
||||
if (!ret)
|
||||
ret = -ENODEV;
|
||||
if (ret < 0) {
|
||||
up_write(&nvm_lock);
|
||||
return ret;
|
||||
}
|
||||
pr_info("nvm: device has no media manager registered.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
down_write(&nvm_lock);
|
||||
tt = nvm_find_target_type(create->tgttype);
|
||||
if (!tt) {
|
||||
pr_err("nvm: target type %s not found\n", create->tgttype);
|
||||
|
@ -195,7 +195,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
|
||||
}
|
||||
|
||||
if (dev->ops->get_l2p_tbl) {
|
||||
ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages,
|
||||
ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
|
||||
gennvm_block_map, dev);
|
||||
if (ret) {
|
||||
pr_err("gennvm: could not read L2P table.\n");
|
||||
@ -219,6 +219,9 @@ static int gennvm_register(struct nvm_dev *dev)
|
||||
struct gen_nvm *gn;
|
||||
int ret;
|
||||
|
||||
if (!try_module_get(THIS_MODULE))
|
||||
return -ENODEV;
|
||||
|
||||
gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL);
|
||||
if (!gn)
|
||||
return -ENOMEM;
|
||||
@ -242,12 +245,14 @@ static int gennvm_register(struct nvm_dev *dev)
|
||||
return 1;
|
||||
err:
|
||||
gennvm_free(dev);
|
||||
module_put(THIS_MODULE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void gennvm_unregister(struct nvm_dev *dev)
|
||||
{
|
||||
gennvm_free(dev);
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
||||
static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
|
||||
@ -262,14 +267,11 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
|
||||
if (list_empty(&lun->free_list)) {
|
||||
pr_err_ratelimited("gennvm: lun %u have no free pages available",
|
||||
lun->vlun.id);
|
||||
spin_unlock(&vlun->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks) {
|
||||
spin_unlock(&vlun->lock);
|
||||
if (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks)
|
||||
goto out;
|
||||
}
|
||||
|
||||
blk = list_first_entry(&lun->free_list, struct nvm_block, list);
|
||||
list_move_tail(&blk->list, &lun->used_list);
|
||||
@ -278,8 +280,8 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
|
||||
lun->vlun.nr_free_blocks--;
|
||||
lun->vlun.nr_inuse_blocks++;
|
||||
|
||||
spin_unlock(&vlun->lock);
|
||||
out:
|
||||
spin_unlock(&vlun->lock);
|
||||
return blk;
|
||||
}
|
||||
|
||||
@ -349,7 +351,7 @@ static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
gennvm_generic_to_addr_mode(dev, rqd);
|
||||
|
||||
rqd->dev = dev;
|
||||
return dev->ops->submit_io(dev->q, rqd);
|
||||
return dev->ops->submit_io(dev, rqd);
|
||||
}
|
||||
|
||||
static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
|
||||
@ -385,7 +387,7 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
if (!dev->ops->set_bb_tbl)
|
||||
return;
|
||||
|
||||
if (dev->ops->set_bb_tbl(dev->q, rqd, 1))
|
||||
if (dev->ops->set_bb_tbl(dev, rqd, 1))
|
||||
return;
|
||||
|
||||
gennvm_addr_to_generic_mode(dev, rqd);
|
||||
@ -453,7 +455,7 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
|
||||
|
||||
gennvm_generic_to_addr_mode(dev, &rqd);
|
||||
|
||||
ret = dev->ops->erase_block(dev->q, &rqd);
|
||||
ret = dev->ops->erase_block(dev, &rqd);
|
||||
|
||||
if (plane_cnt)
|
||||
nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
|
||||
|
@ -182,7 +182,7 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
|
||||
struct nvm_block *blk;
|
||||
struct rrpc_block *rblk;
|
||||
|
||||
blk = nvm_get_blk(rrpc->dev, rlun->parent, 0);
|
||||
blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
|
||||
if (!blk)
|
||||
return NULL;
|
||||
|
||||
@ -202,6 +202,20 @@ static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
|
||||
nvm_put_blk(rrpc->dev, rblk->parent);
|
||||
}
|
||||
|
||||
static void rrpc_put_blks(struct rrpc *rrpc)
|
||||
{
|
||||
struct rrpc_lun *rlun;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < rrpc->nr_luns; i++) {
|
||||
rlun = &rrpc->luns[i];
|
||||
if (rlun->cur)
|
||||
rrpc_put_blk(rrpc, rlun->cur);
|
||||
if (rlun->gc_cur)
|
||||
rrpc_put_blk(rrpc, rlun->gc_cur);
|
||||
}
|
||||
}
|
||||
|
||||
static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
|
||||
{
|
||||
int next = atomic_inc_return(&rrpc->next_lun);
|
||||
@ -1002,7 +1016,7 @@ static int rrpc_map_init(struct rrpc *rrpc)
|
||||
return 0;
|
||||
|
||||
/* Bring up the mapping table from device */
|
||||
ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages,
|
||||
ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
|
||||
rrpc_l2p_update, rrpc);
|
||||
if (ret) {
|
||||
pr_err("nvm: rrpc: could not read L2P table.\n");
|
||||
@ -1224,18 +1238,21 @@ static int rrpc_luns_configure(struct rrpc *rrpc)
|
||||
|
||||
rblk = rrpc_get_blk(rrpc, rlun, 0);
|
||||
if (!rblk)
|
||||
return -EINVAL;
|
||||
goto err;
|
||||
|
||||
rrpc_set_lun_cur(rlun, rblk);
|
||||
|
||||
/* Emergency gc block */
|
||||
rblk = rrpc_get_blk(rrpc, rlun, 1);
|
||||
if (!rblk)
|
||||
return -EINVAL;
|
||||
goto err;
|
||||
rlun->gc_cur = rblk;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err:
|
||||
rrpc_put_blks(rrpc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct nvm_tgt_type tt_rrpc;
|
||||
|
@ -271,9 +271,9 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
|
||||
static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
|
||||
{
|
||||
struct nvme_ns *ns = q->queuedata;
|
||||
struct nvme_ns *ns = nvmdev->q->queuedata;
|
||||
struct nvme_dev *dev = ns->dev;
|
||||
struct nvme_nvm_id *nvme_nvm_id;
|
||||
struct nvme_nvm_command c = {};
|
||||
@ -308,10 +308,10 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb,
|
||||
static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
|
||||
nvm_l2p_update_fn *update_l2p, void *priv)
|
||||
{
|
||||
struct nvme_ns *ns = q->queuedata;
|
||||
struct nvme_ns *ns = nvmdev->q->queuedata;
|
||||
struct nvme_dev *dev = ns->dev;
|
||||
struct nvme_nvm_command c = {};
|
||||
u32 len = queue_max_hw_sectors(dev->admin_q) << 9;
|
||||
@ -415,10 +415,10 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd,
|
||||
static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
|
||||
int type)
|
||||
{
|
||||
struct nvme_ns *ns = q->queuedata;
|
||||
struct nvme_ns *ns = nvmdev->q->queuedata;
|
||||
struct nvme_dev *dev = ns->dev;
|
||||
struct nvme_nvm_command c = {};
|
||||
int ret = 0;
|
||||
@ -455,7 +455,7 @@ static void nvme_nvm_end_io(struct request *rq, int error)
|
||||
struct nvm_rq *rqd = rq->end_io_data;
|
||||
struct nvm_dev *dev = rqd->dev;
|
||||
|
||||
if (dev->mt->end_io(rqd, error))
|
||||
if (dev->mt && dev->mt->end_io(rqd, error))
|
||||
pr_err("nvme: err status: %x result: %lx\n",
|
||||
rq->errors, (unsigned long)rq->special);
|
||||
|
||||
@ -463,8 +463,9 @@ static void nvme_nvm_end_io(struct request *rq, int error)
|
||||
blk_mq_free_request(rq);
|
||||
}
|
||||
|
||||
static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
|
||||
static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
{
|
||||
struct request_queue *q = dev->q;
|
||||
struct nvme_ns *ns = q->queuedata;
|
||||
struct request *rq;
|
||||
struct bio *bio = rqd->bio;
|
||||
@ -502,8 +503,9 @@ static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd)
|
||||
static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
{
|
||||
struct request_queue *q = dev->q;
|
||||
struct nvme_ns *ns = q->queuedata;
|
||||
struct nvme_nvm_command c = {};
|
||||
|
||||
@ -515,9 +517,9 @@ static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd)
|
||||
return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
|
||||
}
|
||||
|
||||
static void *nvme_nvm_create_dma_pool(struct request_queue *q, char *name)
|
||||
static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
|
||||
{
|
||||
struct nvme_ns *ns = q->queuedata;
|
||||
struct nvme_ns *ns = nvmdev->q->queuedata;
|
||||
struct nvme_dev *dev = ns->dev;
|
||||
|
||||
return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0);
|
||||
@ -530,7 +532,7 @@ static void nvme_nvm_destroy_dma_pool(void *pool)
|
||||
dma_pool_destroy(dma_pool);
|
||||
}
|
||||
|
||||
static void *nvme_nvm_dev_dma_alloc(struct request_queue *q, void *pool,
|
||||
static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
|
||||
gfp_t mem_flags, dma_addr_t *dma_handler)
|
||||
{
|
||||
return dma_pool_alloc(pool, mem_flags, dma_handler);
|
||||
|
@ -1523,11 +1523,14 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
|
||||
WARN_ON_ONCE(bdev->bd_holders);
|
||||
sync_blockdev(bdev);
|
||||
kill_bdev(bdev);
|
||||
/*
|
||||
* ->release can cause the queue to disappear, so flush all
|
||||
* dirty data before.
|
||||
*/
|
||||
|
||||
bdev_write_inode(bdev);
|
||||
/*
|
||||
* Detaching bdev inode from its wb in __destroy_inode()
|
||||
* is too late: the queue which embeds its bdi (along with
|
||||
* root wb) can be gone as soon as we put_disk() below.
|
||||
*/
|
||||
inode_detach_wb(bdev->bd_inode);
|
||||
}
|
||||
if (bdev->bd_contains == bdev) {
|
||||
if (disk->fops->release)
|
||||
|
@ -50,9 +50,16 @@ enum {
|
||||
NVM_IO_DUAL_ACCESS = 0x1,
|
||||
NVM_IO_QUAD_ACCESS = 0x2,
|
||||
|
||||
/* NAND Access Modes */
|
||||
NVM_IO_SUSPEND = 0x80,
|
||||
NVM_IO_SLC_MODE = 0x100,
|
||||
NVM_IO_SCRAMBLE_DISABLE = 0x200,
|
||||
|
||||
/* Block Types */
|
||||
NVM_BLK_T_FREE = 0x0,
|
||||
NVM_BLK_T_BAD = 0x1,
|
||||
NVM_BLK_T_DEV = 0x2,
|
||||
NVM_BLK_T_HOST = 0x4,
|
||||
};
|
||||
|
||||
struct nvm_id_group {
|
||||
@ -176,17 +183,17 @@ struct nvm_block;
|
||||
|
||||
typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
|
||||
typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
|
||||
typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *);
|
||||
typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32,
|
||||
typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
|
||||
typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
|
||||
nvm_l2p_update_fn *, void *);
|
||||
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
|
||||
nvm_bb_update_fn *, void *);
|
||||
typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int);
|
||||
typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *);
|
||||
typedef int (nvm_erase_blk_fn)(struct request_queue *, struct nvm_rq *);
|
||||
typedef void *(nvm_create_dma_pool_fn)(struct request_queue *, char *);
|
||||
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
|
||||
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
|
||||
typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
|
||||
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
|
||||
typedef void (nvm_destroy_dma_pool_fn)(void *);
|
||||
typedef void *(nvm_dev_dma_alloc_fn)(struct request_queue *, void *, gfp_t,
|
||||
typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
|
||||
dma_addr_t *);
|
||||
typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user