diff options
Diffstat (limited to 'drivers/mtd/mtdcore.c')
| -rw-r--r-- | drivers/mtd/mtdcore.c | 419 |
1 files changed, 382 insertions, 37 deletions
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 9a9ce71a71f..d201feeb3ca 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -36,11 +36,13 @@ #include <linux/idr.h> #include <linux/backing-dev.h> #include <linux/gfp.h> +#include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include "mtdcore.h" + /* * backing device capabilities for non-mappable devices (such as NAND flash) * - permits private mappings, copies are taken of the data @@ -96,18 +98,14 @@ EXPORT_SYMBOL_GPL(__mtd_next_device); static LIST_HEAD(mtd_notifiers); -#if defined(CONFIG_MTD_CHAR) || defined(CONFIG_MTD_CHAR_MODULE) #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2) -#else -#define MTD_DEVT(index) 0 -#endif /* REVISIT once MTD uses the driver model better, whoever allocates * the mtd_info will probably want to use the release() hook... */ static void mtd_release(struct device *dev) { - struct mtd_info *mtd = dev_get_drvdata(dev); + struct mtd_info __maybe_unused *mtd = dev_get_drvdata(dev); dev_t index = MTD_DEVT(mtd->index); /* remove /dev/mtdXro node if needed */ @@ -126,7 +124,7 @@ static int mtd_cls_resume(struct device *dev) { struct mtd_info *mtd = dev_get_drvdata(dev); - if (mtd && mtd->resume) + if (mtd) mtd_resume(mtd); return 0; } @@ -159,6 +157,9 @@ static ssize_t mtd_type_show(struct device *dev, case MTD_UBIVOLUME: type = "ubi"; break; + case MTD_MLCNANDFLASH: + type = "mlc-nand"; + break; default: type = "unknown"; } @@ -250,6 +251,53 @@ static ssize_t mtd_name_show(struct device *dev, } static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL); +static ssize_t mtd_ecc_strength_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mtd_info *mtd = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength); +} +static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL); + +static ssize_t mtd_bitflip_threshold_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mtd_info *mtd = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold); +} + +static ssize_t mtd_bitflip_threshold_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mtd_info *mtd = dev_get_drvdata(dev); + unsigned int bitflip_threshold; + int retval; + + retval = kstrtouint(buf, 0, &bitflip_threshold); + if (retval) + return retval; + + mtd->bitflip_threshold = bitflip_threshold; + return count; +} +static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR, + mtd_bitflip_threshold_show, + mtd_bitflip_threshold_store); + +static ssize_t mtd_ecc_step_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mtd_info *mtd = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size); + +} +static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL); + static struct attribute *mtd_attrs[] = { &dev_attr_type.attr, &dev_attr_flags.attr, @@ -260,17 +308,12 @@ static struct attribute *mtd_attrs[] = { &dev_attr_oobsize.attr, &dev_attr_numeraseregions.attr, &dev_attr_name.attr, + &dev_attr_ecc_strength.attr, + &dev_attr_ecc_step_size.attr, + &dev_attr_bitflip_threshold.attr, NULL, }; - -static struct attribute_group mtd_group = { - .attrs = mtd_attrs, -}; - -static const struct attribute_group *mtd_groups[] = { - &mtd_group, - NULL, -}; +ATTRIBUTE_GROUPS(mtd); static struct device_type mtd_devtype = { .name = "mtd", @@ -310,18 +353,17 @@ int add_mtd_device(struct mtd_info *mtd) BUG_ON(mtd->writesize == 0); mutex_lock(&mtd_table_mutex); - do { - if (!idr_pre_get(&mtd_idr, GFP_KERNEL)) - goto fail_locked; - error = idr_get_new(&mtd_idr, mtd, &i); - } while (error == -EAGAIN); - - if (error) + i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL); + if (i < 0) goto fail_locked; mtd->index = i; mtd->usecount = 0; + /* default value if not set by driver */ + if (mtd->bitflip_threshold == 0) + mtd->bitflip_threshold = mtd->ecc_strength; + if (is_power_of_2(mtd->erasesize)) mtd->erasesize_shift = ffs(mtd->erasesize) - 1; else @@ -454,7 +496,7 @@ out_error: * * Returns zero in case of success and a negative error code in case of failure. */ -int mtd_device_parse_register(struct mtd_info *mtd, const char **types, +int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, struct mtd_part_parser_data *parser_data, const struct mtd_partition *parts, int nr_parts) @@ -610,8 +652,8 @@ int __get_mtd_device(struct mtd_info *mtd) if (!try_module_get(mtd->owner)) return -ENODEV; - if (mtd->get_device) { - err = mtd->get_device(mtd); + if (mtd->_get_device) { + err = mtd->_get_device(mtd); if (err) { module_put(mtd->owner); @@ -675,14 +717,310 @@ void __put_mtd_device(struct mtd_info *mtd) --mtd->usecount; BUG_ON(mtd->usecount < 0); - if (mtd->put_device) - mtd->put_device(mtd); + if (mtd->_put_device) + mtd->_put_device(mtd); module_put(mtd->owner); } EXPORT_SYMBOL_GPL(__put_mtd_device); /* + * Erase is an asynchronous operation. Device drivers are supposed + * to call instr->callback() whenever the operation completes, even + * if it completes with a failure. + * Callers are supposed to pass a callback function and wait for it + * to be called before writing to the block. + */ +int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) +{ + if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr) + return -EINVAL; + if (!(mtd->flags & MTD_WRITEABLE)) + return -EROFS; + instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; + if (!instr->len) { + instr->state = MTD_ERASE_DONE; + mtd_erase_callback(instr); + return 0; + } + return mtd->_erase(mtd, instr); +} +EXPORT_SYMBOL_GPL(mtd_erase); + +/* + * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. + */ +int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, + void **virt, resource_size_t *phys) +{ + *retlen = 0; + *virt = NULL; + if (phys) + *phys = 0; + if (!mtd->_point) + return -EOPNOTSUPP; + if (from < 0 || from > mtd->size || len > mtd->size - from) + return -EINVAL; + if (!len) + return 0; + return mtd->_point(mtd, from, len, retlen, virt, phys); +} +EXPORT_SYMBOL_GPL(mtd_point); + +/* We probably shouldn't allow XIP if the unpoint isn't a NULL */ +int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) +{ + if (!mtd->_point) + return -EOPNOTSUPP; + if (from < 0 || from > mtd->size || len > mtd->size - from) + return -EINVAL; + if (!len) + return 0; + return mtd->_unpoint(mtd, from, len); +} +EXPORT_SYMBOL_GPL(mtd_unpoint); + +/* + * Allow NOMMU mmap() to directly map the device (if not NULL) + * - return the address to which the offset maps + * - return -ENOSYS to indicate refusal to do the mapping + */ +unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, + unsigned long offset, unsigned long flags) +{ + if (!mtd->_get_unmapped_area) + return -EOPNOTSUPP; + if (offset > mtd->size || len > mtd->size - offset) + return -EINVAL; + return mtd->_get_unmapped_area(mtd, len, offset, flags); +} +EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); + +int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, + u_char *buf) +{ + int ret_code; + *retlen = 0; + if (from < 0 || from > mtd->size || len > mtd->size - from) + return -EINVAL; + if (!len) + return 0; + + /* + * In the absence of an error, drivers return a non-negative integer + * representing the maximum number of bitflips that were corrected on + * any one ecc region (if applicable; zero otherwise). + */ + ret_code = mtd->_read(mtd, from, len, retlen, buf); + if (unlikely(ret_code < 0)) + return ret_code; + if (mtd->ecc_strength == 0) + return 0; /* device lacks ecc */ + return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; +} +EXPORT_SYMBOL_GPL(mtd_read); + +int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, + const u_char *buf) +{ + *retlen = 0; + if (to < 0 || to > mtd->size || len > mtd->size - to) + return -EINVAL; + if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE)) + return -EROFS; + if (!len) + return 0; + return mtd->_write(mtd, to, len, retlen, buf); +} +EXPORT_SYMBOL_GPL(mtd_write); + +/* + * In blackbox flight recorder like scenarios we want to make successful writes + * in interrupt context. panic_write() is only intended to be called when its + * known the kernel is about to panic and we need the write to succeed. Since + * the kernel is not going to be running for much longer, this function can + * break locks and delay to ensure the write succeeds (but not sleep). + */ +int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, + const u_char *buf) +{ + *retlen = 0; + if (!mtd->_panic_write) + return -EOPNOTSUPP; + if (to < 0 || to > mtd->size || len > mtd->size - to) + return -EINVAL; + if (!(mtd->flags & MTD_WRITEABLE)) + return -EROFS; + if (!len) + return 0; + return mtd->_panic_write(mtd, to, len, retlen, buf); +} +EXPORT_SYMBOL_GPL(mtd_panic_write); + +int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) +{ + int ret_code; + ops->retlen = ops->oobretlen = 0; + if (!mtd->_read_oob) + return -EOPNOTSUPP; + /* + * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics + * similar to mtd->_read(), returning a non-negative integer + * representing max bitflips. In other cases, mtd->_read_oob() may + * return -EUCLEAN. In all cases, perform similar logic to mtd_read(). + */ + ret_code = mtd->_read_oob(mtd, from, ops); + if (unlikely(ret_code < 0)) + return ret_code; + if (mtd->ecc_strength == 0) + return 0; /* device lacks ecc */ + return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; +} +EXPORT_SYMBOL_GPL(mtd_read_oob); + +/* + * Method to access the protection register area, present in some flash + * devices. The user data is one time programmable but the factory data is read + * only. + */ +int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, + struct otp_info *buf) +{ + if (!mtd->_get_fact_prot_info) + return -EOPNOTSUPP; + if (!len) + return 0; + return mtd->_get_fact_prot_info(mtd, len, retlen, buf); +} +EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info); + +int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + *retlen = 0; + if (!mtd->_read_fact_prot_reg) + return -EOPNOTSUPP; + if (!len) + return 0; + return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf); +} +EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg); + +int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, + struct otp_info *buf) +{ + if (!mtd->_get_user_prot_info) + return -EOPNOTSUPP; + if (!len) + return 0; + return mtd->_get_user_prot_info(mtd, len, retlen, buf); +} +EXPORT_SYMBOL_GPL(mtd_get_user_prot_info); + +int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + *retlen = 0; + if (!mtd->_read_user_prot_reg) + return -EOPNOTSUPP; + if (!len) + return 0; + return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf); +} +EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg); + +int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, u_char *buf) +{ + int ret; + + *retlen = 0; + if (!mtd->_write_user_prot_reg) + return -EOPNOTSUPP; + if (!len) + return 0; + ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf); + if (ret) + return ret; + + /* + * If no data could be written at all, we are out of memory and + * must return -ENOSPC. + */ + return (*retlen) ? 0 : -ENOSPC; +} +EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg); + +int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) +{ + if (!mtd->_lock_user_prot_reg) + return -EOPNOTSUPP; + if (!len) + return 0; + return mtd->_lock_user_prot_reg(mtd, from, len); +} +EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg); + +/* Chip-supported device locking */ +int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + if (!mtd->_lock) + return -EOPNOTSUPP; + if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs) + return -EINVAL; + if (!len) + return 0; + return mtd->_lock(mtd, ofs, len); +} +EXPORT_SYMBOL_GPL(mtd_lock); + +int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + if (!mtd->_unlock) + return -EOPNOTSUPP; + if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs) + return -EINVAL; + if (!len) + return 0; + return mtd->_unlock(mtd, ofs, len); +} +EXPORT_SYMBOL_GPL(mtd_unlock); + +int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + if (!mtd->_is_locked) + return -EOPNOTSUPP; + if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs) + return -EINVAL; + if (!len) + return 0; + return mtd->_is_locked(mtd, ofs, len); +} +EXPORT_SYMBOL_GPL(mtd_is_locked); + +int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) +{ + if (!mtd->_block_isbad) + return 0; + if (ofs < 0 || ofs > mtd->size) + return -EINVAL; + return mtd->_block_isbad(mtd, ofs); +} +EXPORT_SYMBOL_GPL(mtd_block_isbad); + +int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) +{ + if (!mtd->_block_markbad) + return -EOPNOTSUPP; + if (ofs < 0 || ofs > mtd->size) + return -EINVAL; + if (!(mtd->flags & MTD_WRITEABLE)) + return -EROFS; + return mtd->_block_markbad(mtd, ofs); +} +EXPORT_SYMBOL_GPL(mtd_block_markbad); + +/* * default_mtd_writev - the default writev method * @mtd: mtd device description object pointer * @vecs: the vectors to write @@ -729,9 +1067,11 @@ int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen) { *retlen = 0; - if (!mtd->writev) + if (!(mtd->flags & MTD_WRITEABLE)) + return -EROFS; + if (!mtd->_writev) return default_mtd_writev(mtd, vecs, count, to, retlen); - return mtd->writev(mtd, vecs, count, to, retlen); + return mtd->_writev(mtd, vecs, count, to, retlen); } EXPORT_SYMBOL_GPL(mtd_writev); @@ -790,8 +1130,6 @@ EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to); /*====================================================================*/ /* Support for /proc/mtd */ -static struct proc_dir_entry *proc_mtd; - static int mtd_proc_show(struct seq_file *m, void *v) { struct mtd_info *mtd; @@ -829,7 +1167,7 @@ static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name) ret = bdi_init(bdi); if (!ret) - ret = bdi_register(bdi, NULL, name); + ret = bdi_register(bdi, NULL, "%s", name); if (ret) bdi_destroy(bdi); @@ -837,6 +1175,8 @@ static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name) return ret; } +static struct proc_dir_entry *proc_mtd; + static int __init init_mtd(void) { int ret; @@ -857,11 +1197,17 @@ static int __init init_mtd(void) if (ret) goto err_bdi3; -#ifdef CONFIG_PROC_FS proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops); -#endif /* CONFIG_PROC_FS */ + + ret = init_mtdchar(); + if (ret) + goto out_procfs; + return 0; +out_procfs: + if (proc_mtd) + remove_proc_entry("mtd", NULL); err_bdi3: bdi_destroy(&mtd_bdi_ro_mappable); err_bdi2: @@ -875,10 +1221,9 @@ err_reg: static void __exit cleanup_mtd(void) { -#ifdef CONFIG_PROC_FS + cleanup_mtdchar(); if (proc_mtd) - remove_proc_entry( "mtd", NULL); -#endif /* CONFIG_PROC_FS */ + remove_proc_entry("mtd", NULL); class_unregister(&mtd_class); bdi_destroy(&mtd_bdi_unmappable); bdi_destroy(&mtd_bdi_ro_mappable); |
