mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-22 17:33:01 +00:00
69423d99fc
MTD internal API presently uses 32-bit values to represent device size. This patch updates them to 64-bits but leaves the external API unchanged. Extending the external API is a separate issue for several reasons. First, no one needs it at the moment. Secondly, whether the implementation is done with IOCTLs, sysfs or both is still debated. Thirdly external API changes require the internal API to be accepted first. Note that although the MTD API will be able to support 64-bit device sizes, existing drivers do not and are not required to do so, although NAND base has been updated. In general, changing from 32-bit to 64-bit values cause little or no changes to the majority of the code with the following exceptions: - printk message formats - division and modulus of 64-bit values - NAND base support - 32-bit local variables used by mtdpart and mtdconcat - naughtily assuming one structure maps to another in MEMERASE ioctl Signed-off-by: Adrian Hunter <ext-adrian.hunter@nokia.com> Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
828 lines
17 KiB
C
828 lines
17 KiB
C
/*
|
|
* Character-device access to raw MTD devices.
|
|
*
|
|
*/
|
|
|
|
#include <linux/device.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/err.h>
|
|
#include <linux/init.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/smp_lock.h>
|
|
|
|
#include <linux/mtd/mtd.h>
|
|
#include <linux/mtd/compatmac.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
static struct class *mtd_class;
|
|
|
|
static void mtd_notify_add(struct mtd_info* mtd)
|
|
{
|
|
if (!mtd)
|
|
return;
|
|
|
|
device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2),
|
|
NULL, "mtd%d", mtd->index);
|
|
|
|
device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1),
|
|
NULL, "mtd%dro", mtd->index);
|
|
}
|
|
|
|
static void mtd_notify_remove(struct mtd_info* mtd)
|
|
{
|
|
if (!mtd)
|
|
return;
|
|
|
|
device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2));
|
|
device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1));
|
|
}
|
|
|
|
static struct mtd_notifier notifier = {
|
|
.add = mtd_notify_add,
|
|
.remove = mtd_notify_remove,
|
|
};
|
|
|
|
/*
|
|
* Data structure to hold the pointer to the mtd device as well
|
|
* as mode information ofr various use cases.
|
|
*/
|
|
struct mtd_file_info {
|
|
struct mtd_info *mtd;
|
|
enum mtd_file_modes mode;
|
|
};
|
|
|
|
static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
|
|
{
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
struct mtd_info *mtd = mfi->mtd;
|
|
|
|
switch (orig) {
|
|
case SEEK_SET:
|
|
break;
|
|
case SEEK_CUR:
|
|
offset += file->f_pos;
|
|
break;
|
|
case SEEK_END:
|
|
offset += mtd->size;
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (offset >= 0 && offset <= mtd->size)
|
|
return file->f_pos = offset;
|
|
|
|
return -EINVAL;
|
|
}
|
|
|
|
|
|
|
|
static int mtd_open(struct inode *inode, struct file *file)
|
|
{
|
|
int minor = iminor(inode);
|
|
int devnum = minor >> 1;
|
|
int ret = 0;
|
|
struct mtd_info *mtd;
|
|
struct mtd_file_info *mfi;
|
|
|
|
DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
|
|
|
|
if (devnum >= MAX_MTD_DEVICES)
|
|
return -ENODEV;
|
|
|
|
/* You can't open the RO devices RW */
|
|
if ((file->f_mode & FMODE_WRITE) && (minor & 1))
|
|
return -EACCES;
|
|
|
|
lock_kernel();
|
|
mtd = get_mtd_device(NULL, devnum);
|
|
|
|
if (IS_ERR(mtd)) {
|
|
ret = PTR_ERR(mtd);
|
|
goto out;
|
|
}
|
|
|
|
if (MTD_ABSENT == mtd->type) {
|
|
put_mtd_device(mtd);
|
|
ret = -ENODEV;
|
|
goto out;
|
|
}
|
|
|
|
/* You can't open it RW if it's not a writeable device */
|
|
if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
|
|
put_mtd_device(mtd);
|
|
ret = -EACCES;
|
|
goto out;
|
|
}
|
|
|
|
mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
|
|
if (!mfi) {
|
|
put_mtd_device(mtd);
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
}
|
|
mfi->mtd = mtd;
|
|
file->private_data = mfi;
|
|
|
|
out:
|
|
unlock_kernel();
|
|
return ret;
|
|
} /* mtd_open */
|
|
|
|
/*====================================================================*/
|
|
|
|
static int mtd_close(struct inode *inode, struct file *file)
|
|
{
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
struct mtd_info *mtd = mfi->mtd;
|
|
|
|
DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
|
|
|
|
/* Only sync if opened RW */
|
|
if ((file->f_mode & FMODE_WRITE) && mtd->sync)
|
|
mtd->sync(mtd);
|
|
|
|
put_mtd_device(mtd);
|
|
file->private_data = NULL;
|
|
kfree(mfi);
|
|
|
|
return 0;
|
|
} /* mtd_close */
|
|
|
|
/* FIXME: This _really_ needs to die. In 2.5, we should lock the
|
|
userspace buffer down and use it directly with readv/writev.
|
|
*/
|
|
#define MAX_KMALLOC_SIZE 0x20000
|
|
|
|
static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
|
|
{
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
struct mtd_info *mtd = mfi->mtd;
|
|
size_t retlen=0;
|
|
size_t total_retlen=0;
|
|
int ret=0;
|
|
int len;
|
|
char *kbuf;
|
|
|
|
DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
|
|
|
|
if (*ppos + count > mtd->size)
|
|
count = mtd->size - *ppos;
|
|
|
|
if (!count)
|
|
return 0;
|
|
|
|
/* FIXME: Use kiovec in 2.5 to lock down the user's buffers
|
|
and pass them directly to the MTD functions */
|
|
|
|
if (count > MAX_KMALLOC_SIZE)
|
|
kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
|
|
else
|
|
kbuf=kmalloc(count, GFP_KERNEL);
|
|
|
|
if (!kbuf)
|
|
return -ENOMEM;
|
|
|
|
while (count) {
|
|
|
|
if (count > MAX_KMALLOC_SIZE)
|
|
len = MAX_KMALLOC_SIZE;
|
|
else
|
|
len = count;
|
|
|
|
switch (mfi->mode) {
|
|
case MTD_MODE_OTP_FACTORY:
|
|
ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
|
|
break;
|
|
case MTD_MODE_OTP_USER:
|
|
ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
|
|
break;
|
|
case MTD_MODE_RAW:
|
|
{
|
|
struct mtd_oob_ops ops;
|
|
|
|
ops.mode = MTD_OOB_RAW;
|
|
ops.datbuf = kbuf;
|
|
ops.oobbuf = NULL;
|
|
ops.len = len;
|
|
|
|
ret = mtd->read_oob(mtd, *ppos, &ops);
|
|
retlen = ops.retlen;
|
|
break;
|
|
}
|
|
default:
|
|
ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
|
|
}
|
|
/* Nand returns -EBADMSG on ecc errors, but it returns
|
|
* the data. For our userspace tools it is important
|
|
* to dump areas with ecc errors !
|
|
* For kernel internal usage it also might return -EUCLEAN
|
|
* to signal the caller that a bitflip has occured and has
|
|
* been corrected by the ECC algorithm.
|
|
* Userspace software which accesses NAND this way
|
|
* must be aware of the fact that it deals with NAND
|
|
*/
|
|
if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
|
|
*ppos += retlen;
|
|
if (copy_to_user(buf, kbuf, retlen)) {
|
|
kfree(kbuf);
|
|
return -EFAULT;
|
|
}
|
|
else
|
|
total_retlen += retlen;
|
|
|
|
count -= retlen;
|
|
buf += retlen;
|
|
if (retlen == 0)
|
|
count = 0;
|
|
}
|
|
else {
|
|
kfree(kbuf);
|
|
return ret;
|
|
}
|
|
|
|
}
|
|
|
|
kfree(kbuf);
|
|
return total_retlen;
|
|
} /* mtd_read */
|
|
|
|
static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
|
|
{
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
struct mtd_info *mtd = mfi->mtd;
|
|
char *kbuf;
|
|
size_t retlen;
|
|
size_t total_retlen=0;
|
|
int ret=0;
|
|
int len;
|
|
|
|
DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
|
|
|
|
if (*ppos == mtd->size)
|
|
return -ENOSPC;
|
|
|
|
if (*ppos + count > mtd->size)
|
|
count = mtd->size - *ppos;
|
|
|
|
if (!count)
|
|
return 0;
|
|
|
|
if (count > MAX_KMALLOC_SIZE)
|
|
kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
|
|
else
|
|
kbuf=kmalloc(count, GFP_KERNEL);
|
|
|
|
if (!kbuf)
|
|
return -ENOMEM;
|
|
|
|
while (count) {
|
|
|
|
if (count > MAX_KMALLOC_SIZE)
|
|
len = MAX_KMALLOC_SIZE;
|
|
else
|
|
len = count;
|
|
|
|
if (copy_from_user(kbuf, buf, len)) {
|
|
kfree(kbuf);
|
|
return -EFAULT;
|
|
}
|
|
|
|
switch (mfi->mode) {
|
|
case MTD_MODE_OTP_FACTORY:
|
|
ret = -EROFS;
|
|
break;
|
|
case MTD_MODE_OTP_USER:
|
|
if (!mtd->write_user_prot_reg) {
|
|
ret = -EOPNOTSUPP;
|
|
break;
|
|
}
|
|
ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
|
|
break;
|
|
|
|
case MTD_MODE_RAW:
|
|
{
|
|
struct mtd_oob_ops ops;
|
|
|
|
ops.mode = MTD_OOB_RAW;
|
|
ops.datbuf = kbuf;
|
|
ops.oobbuf = NULL;
|
|
ops.len = len;
|
|
|
|
ret = mtd->write_oob(mtd, *ppos, &ops);
|
|
retlen = ops.retlen;
|
|
break;
|
|
}
|
|
|
|
default:
|
|
ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
|
|
}
|
|
if (!ret) {
|
|
*ppos += retlen;
|
|
total_retlen += retlen;
|
|
count -= retlen;
|
|
buf += retlen;
|
|
}
|
|
else {
|
|
kfree(kbuf);
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
kfree(kbuf);
|
|
return total_retlen;
|
|
} /* mtd_write */
|
|
|
|
/*======================================================================
|
|
|
|
IOCTL calls for getting device parameters.
|
|
|
|
======================================================================*/
|
|
static void mtdchar_erase_callback (struct erase_info *instr)
|
|
{
|
|
wake_up((wait_queue_head_t *)instr->priv);
|
|
}
|
|
|
|
#ifdef CONFIG_HAVE_MTD_OTP
|
|
static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
|
|
{
|
|
struct mtd_info *mtd = mfi->mtd;
|
|
int ret = 0;
|
|
|
|
switch (mode) {
|
|
case MTD_OTP_FACTORY:
|
|
if (!mtd->read_fact_prot_reg)
|
|
ret = -EOPNOTSUPP;
|
|
else
|
|
mfi->mode = MTD_MODE_OTP_FACTORY;
|
|
break;
|
|
case MTD_OTP_USER:
|
|
if (!mtd->read_fact_prot_reg)
|
|
ret = -EOPNOTSUPP;
|
|
else
|
|
mfi->mode = MTD_MODE_OTP_USER;
|
|
break;
|
|
default:
|
|
ret = -EINVAL;
|
|
case MTD_OTP_OFF:
|
|
break;
|
|
}
|
|
return ret;
|
|
}
|
|
#else
|
|
# define otp_select_filemode(f,m) -EOPNOTSUPP
|
|
#endif
|
|
|
|
static int mtd_ioctl(struct inode *inode, struct file *file,
|
|
u_int cmd, u_long arg)
|
|
{
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
struct mtd_info *mtd = mfi->mtd;
|
|
void __user *argp = (void __user *)arg;
|
|
int ret = 0;
|
|
u_long size;
|
|
struct mtd_info_user info;
|
|
|
|
DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
|
|
|
|
size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
|
|
if (cmd & IOC_IN) {
|
|
if (!access_ok(VERIFY_READ, argp, size))
|
|
return -EFAULT;
|
|
}
|
|
if (cmd & IOC_OUT) {
|
|
if (!access_ok(VERIFY_WRITE, argp, size))
|
|
return -EFAULT;
|
|
}
|
|
|
|
switch (cmd) {
|
|
case MEMGETREGIONCOUNT:
|
|
if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
|
|
return -EFAULT;
|
|
break;
|
|
|
|
case MEMGETREGIONINFO:
|
|
{
|
|
uint32_t ur_idx;
|
|
struct mtd_erase_region_info *kr;
|
|
struct region_info_user *ur = (struct region_info_user *) argp;
|
|
|
|
if (get_user(ur_idx, &(ur->regionindex)))
|
|
return -EFAULT;
|
|
|
|
kr = &(mtd->eraseregions[ur_idx]);
|
|
|
|
if (put_user(kr->offset, &(ur->offset))
|
|
|| put_user(kr->erasesize, &(ur->erasesize))
|
|
|| put_user(kr->numblocks, &(ur->numblocks)))
|
|
return -EFAULT;
|
|
|
|
break;
|
|
}
|
|
|
|
case MEMGETINFO:
|
|
info.type = mtd->type;
|
|
info.flags = mtd->flags;
|
|
info.size = mtd->size;
|
|
info.erasesize = mtd->erasesize;
|
|
info.writesize = mtd->writesize;
|
|
info.oobsize = mtd->oobsize;
|
|
/* The below fields are obsolete */
|
|
info.ecctype = -1;
|
|
info.eccsize = 0;
|
|
if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
|
|
return -EFAULT;
|
|
break;
|
|
|
|
case MEMERASE:
|
|
{
|
|
struct erase_info *erase;
|
|
|
|
if(!(file->f_mode & FMODE_WRITE))
|
|
return -EPERM;
|
|
|
|
erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
|
|
if (!erase)
|
|
ret = -ENOMEM;
|
|
else {
|
|
struct erase_info_user einfo;
|
|
|
|
wait_queue_head_t waitq;
|
|
DECLARE_WAITQUEUE(wait, current);
|
|
|
|
init_waitqueue_head(&waitq);
|
|
|
|
if (copy_from_user(&einfo, argp,
|
|
sizeof(struct erase_info_user))) {
|
|
kfree(erase);
|
|
return -EFAULT;
|
|
}
|
|
erase->addr = einfo.start;
|
|
erase->len = einfo.length;
|
|
erase->mtd = mtd;
|
|
erase->callback = mtdchar_erase_callback;
|
|
erase->priv = (unsigned long)&waitq;
|
|
|
|
/*
|
|
FIXME: Allow INTERRUPTIBLE. Which means
|
|
not having the wait_queue head on the stack.
|
|
|
|
If the wq_head is on the stack, and we
|
|
leave because we got interrupted, then the
|
|
wq_head is no longer there when the
|
|
callback routine tries to wake us up.
|
|
*/
|
|
ret = mtd->erase(mtd, erase);
|
|
if (!ret) {
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
|
add_wait_queue(&waitq, &wait);
|
|
if (erase->state != MTD_ERASE_DONE &&
|
|
erase->state != MTD_ERASE_FAILED)
|
|
schedule();
|
|
remove_wait_queue(&waitq, &wait);
|
|
set_current_state(TASK_RUNNING);
|
|
|
|
ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
|
|
}
|
|
kfree(erase);
|
|
}
|
|
break;
|
|
}
|
|
|
|
case MEMWRITEOOB:
|
|
{
|
|
struct mtd_oob_buf buf;
|
|
struct mtd_oob_ops ops;
|
|
struct mtd_oob_buf __user *user_buf = argp;
|
|
uint32_t retlen;
|
|
|
|
if(!(file->f_mode & FMODE_WRITE))
|
|
return -EPERM;
|
|
|
|
if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
|
|
return -EFAULT;
|
|
|
|
if (buf.length > 4096)
|
|
return -EINVAL;
|
|
|
|
if (!mtd->write_oob)
|
|
ret = -EOPNOTSUPP;
|
|
else
|
|
ret = access_ok(VERIFY_READ, buf.ptr,
|
|
buf.length) ? 0 : EFAULT;
|
|
|
|
if (ret)
|
|
return ret;
|
|
|
|
ops.ooblen = buf.length;
|
|
ops.ooboffs = buf.start & (mtd->oobsize - 1);
|
|
ops.datbuf = NULL;
|
|
ops.mode = MTD_OOB_PLACE;
|
|
|
|
if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
|
|
return -EINVAL;
|
|
|
|
ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
|
|
if (!ops.oobbuf)
|
|
return -ENOMEM;
|
|
|
|
if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) {
|
|
kfree(ops.oobbuf);
|
|
return -EFAULT;
|
|
}
|
|
|
|
buf.start &= ~(mtd->oobsize - 1);
|
|
ret = mtd->write_oob(mtd, buf.start, &ops);
|
|
|
|
if (ops.oobretlen > 0xFFFFFFFFU)
|
|
ret = -EOVERFLOW;
|
|
retlen = ops.oobretlen;
|
|
if (copy_to_user(&user_buf->length, &retlen, sizeof(buf.length)))
|
|
ret = -EFAULT;
|
|
|
|
kfree(ops.oobbuf);
|
|
break;
|
|
|
|
}
|
|
|
|
case MEMREADOOB:
|
|
{
|
|
struct mtd_oob_buf buf;
|
|
struct mtd_oob_ops ops;
|
|
|
|
if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
|
|
return -EFAULT;
|
|
|
|
if (buf.length > 4096)
|
|
return -EINVAL;
|
|
|
|
if (!mtd->read_oob)
|
|
ret = -EOPNOTSUPP;
|
|
else
|
|
ret = access_ok(VERIFY_WRITE, buf.ptr,
|
|
buf.length) ? 0 : -EFAULT;
|
|
if (ret)
|
|
return ret;
|
|
|
|
ops.ooblen = buf.length;
|
|
ops.ooboffs = buf.start & (mtd->oobsize - 1);
|
|
ops.datbuf = NULL;
|
|
ops.mode = MTD_OOB_PLACE;
|
|
|
|
if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
|
|
return -EINVAL;
|
|
|
|
ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
|
|
if (!ops.oobbuf)
|
|
return -ENOMEM;
|
|
|
|
buf.start &= ~(mtd->oobsize - 1);
|
|
ret = mtd->read_oob(mtd, buf.start, &ops);
|
|
|
|
if (put_user(ops.oobretlen, (uint32_t __user *)argp))
|
|
ret = -EFAULT;
|
|
else if (ops.oobretlen && copy_to_user(buf.ptr, ops.oobbuf,
|
|
ops.oobretlen))
|
|
ret = -EFAULT;
|
|
|
|
kfree(ops.oobbuf);
|
|
break;
|
|
}
|
|
|
|
case MEMLOCK:
|
|
{
|
|
struct erase_info_user einfo;
|
|
|
|
if (copy_from_user(&einfo, argp, sizeof(einfo)))
|
|
return -EFAULT;
|
|
|
|
if (!mtd->lock)
|
|
ret = -EOPNOTSUPP;
|
|
else
|
|
ret = mtd->lock(mtd, einfo.start, einfo.length);
|
|
break;
|
|
}
|
|
|
|
case MEMUNLOCK:
|
|
{
|
|
struct erase_info_user einfo;
|
|
|
|
if (copy_from_user(&einfo, argp, sizeof(einfo)))
|
|
return -EFAULT;
|
|
|
|
if (!mtd->unlock)
|
|
ret = -EOPNOTSUPP;
|
|
else
|
|
ret = mtd->unlock(mtd, einfo.start, einfo.length);
|
|
break;
|
|
}
|
|
|
|
/* Legacy interface */
|
|
case MEMGETOOBSEL:
|
|
{
|
|
struct nand_oobinfo oi;
|
|
|
|
if (!mtd->ecclayout)
|
|
return -EOPNOTSUPP;
|
|
if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
|
|
return -EINVAL;
|
|
|
|
oi.useecc = MTD_NANDECC_AUTOPLACE;
|
|
memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
|
|
memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
|
|
sizeof(oi.oobfree));
|
|
oi.eccbytes = mtd->ecclayout->eccbytes;
|
|
|
|
if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
|
|
return -EFAULT;
|
|
break;
|
|
}
|
|
|
|
case MEMGETBADBLOCK:
|
|
{
|
|
loff_t offs;
|
|
|
|
if (copy_from_user(&offs, argp, sizeof(loff_t)))
|
|
return -EFAULT;
|
|
if (!mtd->block_isbad)
|
|
ret = -EOPNOTSUPP;
|
|
else
|
|
return mtd->block_isbad(mtd, offs);
|
|
break;
|
|
}
|
|
|
|
case MEMSETBADBLOCK:
|
|
{
|
|
loff_t offs;
|
|
|
|
if (copy_from_user(&offs, argp, sizeof(loff_t)))
|
|
return -EFAULT;
|
|
if (!mtd->block_markbad)
|
|
ret = -EOPNOTSUPP;
|
|
else
|
|
return mtd->block_markbad(mtd, offs);
|
|
break;
|
|
}
|
|
|
|
#ifdef CONFIG_HAVE_MTD_OTP
|
|
case OTPSELECT:
|
|
{
|
|
int mode;
|
|
if (copy_from_user(&mode, argp, sizeof(int)))
|
|
return -EFAULT;
|
|
|
|
mfi->mode = MTD_MODE_NORMAL;
|
|
|
|
ret = otp_select_filemode(mfi, mode);
|
|
|
|
file->f_pos = 0;
|
|
break;
|
|
}
|
|
|
|
case OTPGETREGIONCOUNT:
|
|
case OTPGETREGIONINFO:
|
|
{
|
|
struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
|
|
if (!buf)
|
|
return -ENOMEM;
|
|
ret = -EOPNOTSUPP;
|
|
switch (mfi->mode) {
|
|
case MTD_MODE_OTP_FACTORY:
|
|
if (mtd->get_fact_prot_info)
|
|
ret = mtd->get_fact_prot_info(mtd, buf, 4096);
|
|
break;
|
|
case MTD_MODE_OTP_USER:
|
|
if (mtd->get_user_prot_info)
|
|
ret = mtd->get_user_prot_info(mtd, buf, 4096);
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
if (ret >= 0) {
|
|
if (cmd == OTPGETREGIONCOUNT) {
|
|
int nbr = ret / sizeof(struct otp_info);
|
|
ret = copy_to_user(argp, &nbr, sizeof(int));
|
|
} else
|
|
ret = copy_to_user(argp, buf, ret);
|
|
if (ret)
|
|
ret = -EFAULT;
|
|
}
|
|
kfree(buf);
|
|
break;
|
|
}
|
|
|
|
case OTPLOCK:
|
|
{
|
|
struct otp_info oinfo;
|
|
|
|
if (mfi->mode != MTD_MODE_OTP_USER)
|
|
return -EINVAL;
|
|
if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
|
|
return -EFAULT;
|
|
if (!mtd->lock_user_prot_reg)
|
|
return -EOPNOTSUPP;
|
|
ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
|
|
break;
|
|
}
|
|
#endif
|
|
|
|
case ECCGETLAYOUT:
|
|
{
|
|
if (!mtd->ecclayout)
|
|
return -EOPNOTSUPP;
|
|
|
|
if (copy_to_user(argp, mtd->ecclayout,
|
|
sizeof(struct nand_ecclayout)))
|
|
return -EFAULT;
|
|
break;
|
|
}
|
|
|
|
case ECCGETSTATS:
|
|
{
|
|
if (copy_to_user(argp, &mtd->ecc_stats,
|
|
sizeof(struct mtd_ecc_stats)))
|
|
return -EFAULT;
|
|
break;
|
|
}
|
|
|
|
case MTDFILEMODE:
|
|
{
|
|
mfi->mode = 0;
|
|
|
|
switch(arg) {
|
|
case MTD_MODE_OTP_FACTORY:
|
|
case MTD_MODE_OTP_USER:
|
|
ret = otp_select_filemode(mfi, arg);
|
|
break;
|
|
|
|
case MTD_MODE_RAW:
|
|
if (!mtd->read_oob || !mtd->write_oob)
|
|
return -EOPNOTSUPP;
|
|
mfi->mode = arg;
|
|
|
|
case MTD_MODE_NORMAL:
|
|
break;
|
|
default:
|
|
ret = -EINVAL;
|
|
}
|
|
file->f_pos = 0;
|
|
break;
|
|
}
|
|
|
|
default:
|
|
ret = -ENOTTY;
|
|
}
|
|
|
|
return ret;
|
|
} /* memory_ioctl */
|
|
|
|
static const struct file_operations mtd_fops = {
|
|
.owner = THIS_MODULE,
|
|
.llseek = mtd_lseek,
|
|
.read = mtd_read,
|
|
.write = mtd_write,
|
|
.ioctl = mtd_ioctl,
|
|
.open = mtd_open,
|
|
.release = mtd_close,
|
|
};
|
|
|
|
static int __init init_mtdchar(void)
|
|
{
|
|
if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) {
|
|
printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
|
|
MTD_CHAR_MAJOR);
|
|
return -EAGAIN;
|
|
}
|
|
|
|
mtd_class = class_create(THIS_MODULE, "mtd");
|
|
|
|
if (IS_ERR(mtd_class)) {
|
|
printk(KERN_ERR "Error creating mtd class.\n");
|
|
unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
|
|
return PTR_ERR(mtd_class);
|
|
}
|
|
|
|
register_mtd_user(¬ifier);
|
|
return 0;
|
|
}
|
|
|
|
static void __exit cleanup_mtdchar(void)
|
|
{
|
|
unregister_mtd_user(¬ifier);
|
|
class_destroy(mtd_class);
|
|
unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
|
|
}
|
|
|
|
module_init(init_mtdchar);
|
|
module_exit(cleanup_mtdchar);
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
|
|
MODULE_DESCRIPTION("Direct character-device access to MTD devices");
|