Commit a840b56b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'upstream-5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs

Pull UBI and UBIFS updates from Richard Weinberger:

 - A new interface for UBI to deal better with read disturb

 - Reject unsupported ioctl flags in UBIFS (xfstests found it)

* tag 'upstream-5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs:
  ubi: wl: Silence uninitialized variable warning
  ubifs: Reject unsupported ioctl flags explicitly
  ubi: Expose the bitrot interface
  ubi: Introduce in_pq()
parents ebc551f2 5578e48e
......@@ -974,6 +974,36 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
/* Check a specific PEB for bitflips and scrub it if needed */
case UBI_IOCRPEB:
{
int pnum;
err = get_user(pnum, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
err = ubi_bitflip_check(ubi, pnum, 0);
break;
}
/* Force scrubbing for a specific PEB */
case UBI_IOCSPEB:
{
int pnum;
err = get_user(pnum, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
err = ubi_bitflip_check(ubi, pnum, 1);
break;
}
default:
err = -ENOTTY;
break;
......
......@@ -929,6 +929,7 @@ int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
int ubi_is_erase_work(struct ubi_work *wrk);
void ubi_refill_pools(struct ubi_device *ubi);
int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force_scrub);
/* io.c */
int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
......
......@@ -277,6 +277,27 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
return 0;
}
/**
* in_pq - check if a wear-leveling entry is present in the protection queue.
* @ubi: UBI device description object
* @e: the wear-leveling entry to check
*
* This function returns non-zero if @e is in the protection queue and zero
* if it is not.
*/
static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
{
struct ubi_wl_entry *p;
int i;
for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
list_for_each_entry(p, &ubi->pq[i], u.list)
if (p == e)
return 1;
return 0;
}
/**
* prot_queue_add - add physical eraseblock to the protection queue.
* @ubi: UBI device description object
......@@ -1419,6 +1440,150 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
return err;
}
static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
{
if (in_wl_tree(e, &ubi->scrub))
return false;
else if (in_wl_tree(e, &ubi->erroneous))
return false;
else if (ubi->move_from == e)
return false;
else if (ubi->move_to == e)
return false;
return true;
}
/**
* ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
* @ubi: UBI device description object
* @pnum: the physical eraseblock to schedule
* @force: dont't read the block, assume bitflips happened and take action.
*
* This function reads the given eraseblock and checks if bitflips occured.
* In case of bitflips, the eraseblock is scheduled for scrubbing.
* If scrubbing is forced with @force, the eraseblock is not read,
* but scheduled for scrubbing right away.
*
* Returns:
* %EINVAL, PEB is out of range
* %ENOENT, PEB is no longer used by UBI
* %EBUSY, PEB cannot be checked now or a check is currently running on it
* %EAGAIN, bit flips happened but scrubbing is currently not possible
* %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing
* %0, no bit flips detected
*/
int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
{
int err;
struct ubi_wl_entry *e;
if (pnum < 0 || pnum >= ubi->peb_count) {
err = -EINVAL;
goto out;
}
/*
* Pause all parallel work, otherwise it can happen that the
* erase worker frees a wl entry under us.
*/
down_write(&ubi->work_sem);
/*
* Make sure that the wl entry does not change state while
* inspecting it.
*/
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
if (!e) {
spin_unlock(&ubi->wl_lock);
err = -ENOENT;
goto out_resume;
}
/*
* Does it make sense to check this PEB?
*/
if (!scrub_possible(ubi, e)) {
spin_unlock(&ubi->wl_lock);
err = -EBUSY;
goto out_resume;
}
spin_unlock(&ubi->wl_lock);
if (!force) {
mutex_lock(&ubi->buf_mutex);
err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
mutex_unlock(&ubi->buf_mutex);
}
if (force || err == UBI_IO_BITFLIPS) {
/*
* Okay, bit flip happened, let's figure out what we can do.
*/
spin_lock(&ubi->wl_lock);
/*
* Recheck. We released wl_lock, UBI might have killed the
* wl entry under us.
*/
e = ubi->lookuptbl[pnum];
if (!e) {
spin_unlock(&ubi->wl_lock);
err = -ENOENT;
goto out_resume;
}
/*
* Need to re-check state
*/
if (!scrub_possible(ubi, e)) {
spin_unlock(&ubi->wl_lock);
err = -EBUSY;
goto out_resume;
}
if (in_pq(ubi, e)) {
prot_queue_del(ubi, e->pnum);
wl_tree_add(e, &ubi->scrub);
spin_unlock(&ubi->wl_lock);
err = ensure_wear_leveling(ubi, 1);
} else if (in_wl_tree(e, &ubi->used)) {
rb_erase(&e->u.rb, &ubi->used);
wl_tree_add(e, &ubi->scrub);
spin_unlock(&ubi->wl_lock);
err = ensure_wear_leveling(ubi, 1);
} else if (in_wl_tree(e, &ubi->free)) {
rb_erase(&e->u.rb, &ubi->free);
ubi->free_count--;
spin_unlock(&ubi->wl_lock);
/*
* This PEB is empty we can schedule it for
* erasure right away. No wear leveling needed.
*/
err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
force ? 0 : 1, true);
} else {
spin_unlock(&ubi->wl_lock);
err = -EAGAIN;
}
if (!err && !force)
err = -EUCLEAN;
} else {
err = 0;
}
out_resume:
up_write(&ubi->work_sem);
out:
return err;
}
/**
* tree_destroy - destroy an RB-tree.
* @ubi: UBI device description object
......@@ -1848,15 +2013,10 @@ static int self_check_in_wl_tree(const struct ubi_device *ubi,
static int self_check_in_pq(const struct ubi_device *ubi,
struct ubi_wl_entry *e)
{
struct ubi_wl_entry *p;
int i;
if (!ubi_dbg_chk_gen(ubi))
return 0;
for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
list_for_each_entry(p, &ubi->pq[i], u.list)
if (p == e)
if (in_pq(ubi, e))
return 0;
ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
......
......@@ -28,6 +28,11 @@
#include <linux/mount.h>
#include "ubifs.h"
/* Need to be kept consistent with checked flags in ioctl2ubifs() */
#define UBIFS_SUPPORTED_IOCTL_FLAGS \
(FS_COMPR_FL | FS_SYNC_FL | FS_APPEND_FL | \
FS_IMMUTABLE_FL | FS_DIRSYNC_FL)
/**
* ubifs_set_inode_flags - set VFS inode flags.
* @inode: VFS inode to set flags for
......@@ -169,6 +174,9 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (get_user(flags, (int __user *) arg))
return -EFAULT;
if (flags & ~UBIFS_SUPPORTED_IOCTL_FLAGS)
return -EOPNOTSUPP;
if (!S_ISDIR(inode->i_mode))
flags &= ~FS_DIRSYNC_FL;
......
......@@ -171,6 +171,11 @@
/* Re-name volumes */
#define UBI_IOCRNVOL _IOW(UBI_IOC_MAGIC, 3, struct ubi_rnvol_req)
/* Read the specified PEB and scrub it if there are bitflips */
#define UBI_IOCRPEB _IOW(UBI_IOC_MAGIC, 4, __s32)
/* Force scrubbing on the specified PEB */
#define UBI_IOCSPEB _IOW(UBI_IOC_MAGIC, 5, __s32)
/* ioctl commands of the UBI control character device */
#define UBI_CTRL_IOC_MAGIC 'o'
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment