Commit d1de762e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'upstream-4.16-rc1' of git://git.infradead.org/linux-ubifs

Pull UBI/UBIFS updates from Richard Weinberger:

 - use the new fscrypt APIs

 - a fix for a Fastmap issue

 - other minor bug fixes

* tag 'upstream-4.16-rc1' of git://git.infradead.org/linux-ubifs:
  ubi: block: Fix locking for idr_alloc/idr_remove
  mtd: ubi: wl: Fix error return code in ubi_wl_init()
  ubi: Fix copy/paste error in function documentation
  ubi: Fastmap: Fix typo
  ubifs: remove error message in ubifs_xattr_get
  ubi: fastmap: Erase outdated anchor PEBs during attach
  ubifs: switch to fscrypt_prepare_setattr()
  ubifs: switch to fscrypt_prepare_lookup()
  ubifs: switch to fscrypt_prepare_rename()
  ubifs: switch to fscrypt_prepare_link()
  ubifs: switch to fscrypt_file_open()
  ubi: fastmap: Clean up the initialization of pointer p
  ubi: fastmap: Use kmem_cache_free to deallocate memory
  ubi: Fix race condition between ubi volume creation and udev
  mtd: ubi: Use 'max_bad_blocks' to compute bad_peb_limit if available
  ubifs: Fix uninitialized variable in search_dh_cookie()
parents 0a4b6e2f 7f29ae9f
......@@ -99,6 +99,8 @@ struct ubiblock {
/* Linked list of all ubiblock instances */
static LIST_HEAD(ubiblock_devices);
static DEFINE_IDR(ubiblock_minor_idr);
/* Protects ubiblock_devices and ubiblock_minor_idr */
static DEFINE_MUTEX(devices_mutex);
static int ubiblock_major;
......@@ -351,8 +353,6 @@ static const struct blk_mq_ops ubiblock_mq_ops = {
.init_request = ubiblock_init_request,
};
static DEFINE_IDR(ubiblock_minor_idr);
int ubiblock_create(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
......@@ -365,14 +365,15 @@ int ubiblock_create(struct ubi_volume_info *vi)
/* Check that the volume isn't already handled */
mutex_lock(&devices_mutex);
if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
mutex_unlock(&devices_mutex);
return -EEXIST;
ret = -EEXIST;
goto out_unlock;
}
mutex_unlock(&devices_mutex);
dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
if (!dev)
return -ENOMEM;
if (!dev) {
ret = -ENOMEM;
goto out_unlock;
}
mutex_init(&dev->dev_mutex);
......@@ -437,14 +438,13 @@ int ubiblock_create(struct ubi_volume_info *vi)
goto out_free_queue;
}
mutex_lock(&devices_mutex);
list_add_tail(&dev->list, &ubiblock_devices);
mutex_unlock(&devices_mutex);
/* Must be the last step: anyone can call file ops from now on */
add_disk(dev->gd);
dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
dev->ubi_num, dev->vol_id, vi->name);
mutex_unlock(&devices_mutex);
return 0;
out_free_queue:
......@@ -457,6 +457,8 @@ int ubiblock_create(struct ubi_volume_info *vi)
put_disk(dev->gd);
out_free_dev:
kfree(dev);
out_unlock:
mutex_unlock(&devices_mutex);
return ret;
}
......@@ -478,30 +480,36 @@ static void ubiblock_cleanup(struct ubiblock *dev)
int ubiblock_remove(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
int ret;
mutex_lock(&devices_mutex);
dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
if (!dev) {
mutex_unlock(&devices_mutex);
return -ENODEV;
ret = -ENODEV;
goto out_unlock;
}
/* Found a device, let's lock it so we can check if it's busy */
mutex_lock(&dev->dev_mutex);
if (dev->refcnt > 0) {
mutex_unlock(&dev->dev_mutex);
mutex_unlock(&devices_mutex);
return -EBUSY;
ret = -EBUSY;
goto out_unlock_dev;
}
/* Remove from device list */
list_del(&dev->list);
mutex_unlock(&devices_mutex);
ubiblock_cleanup(dev);
mutex_unlock(&dev->dev_mutex);
mutex_unlock(&devices_mutex);
kfree(dev);
return 0;
out_unlock_dev:
mutex_unlock(&dev->dev_mutex);
out_unlock:
mutex_unlock(&devices_mutex);
return ret;
}
static int ubiblock_resize(struct ubi_volume_info *vi)
......@@ -630,6 +638,7 @@ static void ubiblock_remove_all(void)
struct ubiblock *next;
struct ubiblock *dev;
mutex_lock(&devices_mutex);
list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
/* The module is being forcefully removed */
WARN_ON(dev->desc);
......@@ -638,6 +647,7 @@ static void ubiblock_remove_all(void)
ubiblock_cleanup(dev);
kfree(dev);
}
mutex_unlock(&devices_mutex);
}
int __init ubiblock_init(void)
......
......@@ -535,8 +535,17 @@ static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
int limit, device_pebs;
uint64_t device_size;
if (!max_beb_per1024)
return 0;
if (!max_beb_per1024) {
/*
* Since max_beb_per1024 has not been set by the user in either
* the cmdline or Kconfig, use mtd_max_bad_blocks to set the
* limit if it is supported by the device.
*/
limit = mtd_max_bad_blocks(ubi->mtd, 0, ubi->mtd->size);
if (limit < 0)
return 0;
return limit;
}
/*
* Here we are using size of the entire flash chip and
......
......@@ -384,7 +384,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
}
/**
* leb_write_lock - lock logical eraseblock for writing.
* leb_write_trylock - try to lock logical eraseblock for writing.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
......
......@@ -66,7 +66,7 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
}
}
static int anchor_pebs_avalible(struct rb_root *root)
static int anchor_pebs_available(struct rb_root *root)
{
struct rb_node *p;
struct ubi_wl_entry *e;
......
......@@ -214,9 +214,8 @@ static void assign_aeb_to_av(struct ubi_attach_info *ai,
struct ubi_ainf_volume *av)
{
struct ubi_ainf_peb *tmp_aeb;
struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
struct rb_node **p = &av->root.rb_node, *parent = NULL;
p = &av->root.rb_node;
while (*p) {
parent = *p;
......@@ -1063,7 +1062,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e) {
while (i--)
kfree(fm->e[i]);
kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
ret = -ENOMEM;
goto free_hdr;
......
......@@ -270,6 +270,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->last_eb_bytes = vol->usable_leb_size;
}
/* Make volume "available" before it becomes accessible via sysfs */
spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = vol;
ubi->vol_count += 1;
spin_unlock(&ubi->volumes_lock);
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
......@@ -298,11 +304,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (err)
goto out_sysfs;
spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = vol;
ubi->vol_count += 1;
spin_unlock(&ubi->volumes_lock);
ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
self_check_volumes(ubi);
return err;
......@@ -315,6 +316,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
*/
cdev_device_del(&vol->cdev, &vol->dev);
out_mapping:
spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = NULL;
ubi->vol_count -= 1;
spin_unlock(&ubi->volumes_lock);
ubi_eba_destroy_table(eba_tbl);
out_acc:
spin_lock(&ubi->volumes_lock);
......
......@@ -692,7 +692,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
#ifdef CONFIG_MTD_UBI_FASTMAP
/* Check whether we need to produce an anchor PEB */
if (!anchor)
anchor = !anchor_pebs_avalible(&ubi->free);
anchor = !anchor_pebs_available(&ubi->free);
if (anchor) {
e1 = find_anchor_wl_entry(&ubi->used);
......@@ -1528,6 +1528,46 @@ static void shutdown_work(struct ubi_device *ubi)
}
}
/**
* erase_aeb - erase a PEB given in UBI attach info PEB
* @ubi: UBI device description object
* @aeb: UBI attach info PEB
* @sync: If true, erase synchronously. Otherwise schedule for erasure
*/
static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
{
struct ubi_wl_entry *e;
int err;
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
return -ENOMEM;
e->pnum = aeb->pnum;
e->ec = aeb->ec;
ubi->lookuptbl[e->pnum] = e;
if (sync) {
err = sync_erase(ubi, e, false);
if (err)
goto out_free;
wl_tree_add(e, &ubi->free);
ubi->free_count++;
} else {
err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
if (err)
goto out_free;
}
return 0;
out_free:
wl_entry_destroy(ubi, e);
return err;
}
/**
* ubi_wl_init - initialize the WL sub-system using attaching information.
* @ubi: UBI device description object
......@@ -1566,17 +1606,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
e->pnum = aeb->pnum;
e->ec = aeb->ec;
ubi->lookuptbl[e->pnum] = e;
if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
wl_entry_destroy(ubi, e);
err = erase_aeb(ubi, aeb, false);
if (err)
goto out_free;
}
found_pebs++;
}
......@@ -1585,8 +1617,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
if (!e) {
err = -ENOMEM;
goto out_free;
}
e->pnum = aeb->pnum;
e->ec = aeb->ec;
......@@ -1605,8 +1639,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
if (!e) {
err = -ENOMEM;
goto out_free;
}
e->pnum = aeb->pnum;
e->ec = aeb->ec;
......@@ -1635,6 +1671,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
ubi_assert(!ubi->lookuptbl[e->pnum]);
ubi->lookuptbl[e->pnum] = e;
} else {
bool sync = false;
/*
* Usually old Fastmap PEBs are scheduled for erasure
* and we don't have to care about them but if we face
......@@ -1644,18 +1682,21 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (ubi->lookuptbl[aeb->pnum])
continue;
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
/*
* The fastmap update code might not find a free PEB for
* writing the fastmap anchor to and then reuses the
* current fastmap anchor PEB. When this PEB gets erased
* and a power cut happens before it is written again we
* must make sure that the fastmap attach code doesn't
* find any outdated fastmap anchors, hence we erase the
* outdated fastmap anchor PEBs synchronously here.
*/
if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
sync = true;
e->pnum = aeb->pnum;
e->ec = aeb->ec;
ubi_assert(!ubi->lookuptbl[e->pnum]);
ubi->lookuptbl[e->pnum] = e;
if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
wl_entry_destroy(ubi, e);
err = erase_aeb(ubi, aeb, sync);
if (err)
goto out_free;
}
}
found_pebs++;
......
......@@ -2,7 +2,7 @@
#ifndef UBI_WL_H
#define UBI_WL_H
#ifdef CONFIG_MTD_UBI_FASTMAP
static int anchor_pebs_avalible(struct rb_root *root);
static int anchor_pebs_available(struct rb_root *root);
static void update_fastmap_work_fn(struct work_struct *wrk);
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
......
......@@ -220,20 +220,9 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
dbg_gen("'%pd' in dir ino %lu", dentry, dir->i_ino);
if (ubifs_crypt_is_encrypted(dir)) {
err = fscrypt_get_encryption_info(dir);
/*
* DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is
* created while the directory was encrypted and we
* have access to the key.
*/
if (fscrypt_has_encryption_key(dir))
fscrypt_set_encrypted_dentry(dentry);
fscrypt_set_d_op(dentry);
if (err && err != -ENOKEY)
return ERR_PTR(err);
}
err = fscrypt_prepare_lookup(dir, dentry, flags);
if (err)
return ERR_PTR(err);
err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &nm);
if (err)
......@@ -743,9 +732,9 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
ubifs_assert(inode_is_locked(dir));
ubifs_assert(inode_is_locked(inode));
if (ubifs_crypt_is_encrypted(dir) &&
!fscrypt_has_permitted_context(dir, inode))
return -EPERM;
err = fscrypt_prepare_link(old_dentry, dir, dentry);
if (err)
return err;
err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
if (err)
......@@ -1353,12 +1342,6 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlink)
ubifs_assert(inode_is_locked(new_inode));
if (old_dir != new_dir) {
if (ubifs_crypt_is_encrypted(new_dir) &&
!fscrypt_has_permitted_context(new_dir, old_inode))
return -EPERM;
}
if (unlink && is_dir) {
err = ubifs_check_dir_empty(new_inode);
if (err)
......@@ -1573,13 +1556,6 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
ubifs_assert(fst_inode && snd_inode);
if ((ubifs_crypt_is_encrypted(old_dir) ||
ubifs_crypt_is_encrypted(new_dir)) &&
(old_dir != new_dir) &&
(!fscrypt_has_permitted_context(new_dir, fst_inode) ||
!fscrypt_has_permitted_context(old_dir, snd_inode)))
return -EPERM;
err = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &fst_nm);
if (err)
return err;
......@@ -1624,12 +1600,19 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
int err;
if (flags & ~(RENAME_NOREPLACE | RENAME_WHITEOUT | RENAME_EXCHANGE))
return -EINVAL;
ubifs_assert(inode_is_locked(old_dir));
ubifs_assert(inode_is_locked(new_dir));
err = fscrypt_prepare_rename(old_dir, old_dentry, new_dir, new_dentry,
flags);
if (err)
return err;
if (flags & RENAME_EXCHANGE)
return ubifs_xrename(old_dir, old_dentry, new_dir, new_dentry);
......
......@@ -1284,13 +1284,9 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
if (err)
return err;
if (ubifs_crypt_is_encrypted(inode) && (attr->ia_valid & ATTR_SIZE)) {
err = fscrypt_get_encryption_info(inode);
if (err)
return err;
if (!fscrypt_has_encryption_key(inode))
return -ENOKEY;
}
err = fscrypt_prepare_setattr(dentry, attr);
if (err)
return err;
if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
/* Truncation to a smaller size */
......@@ -1629,35 +1625,6 @@ static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
static int ubifs_file_open(struct inode *inode, struct file *filp)
{
int ret;
struct dentry *dir;
struct ubifs_info *c = inode->i_sb->s_fs_info;
if (ubifs_crypt_is_encrypted(inode)) {
ret = fscrypt_get_encryption_info(inode);
if (ret)
return -EACCES;
if (!fscrypt_has_encryption_key(inode))
return -ENOKEY;
}
dir = dget_parent(file_dentry(filp));
if (ubifs_crypt_is_encrypted(d_inode(dir)) &&
!fscrypt_has_permitted_context(d_inode(dir), inode)) {
ubifs_err(c, "Inconsistent encryption contexts: %lu/%lu",
(unsigned long) d_inode(dir)->i_ino,
(unsigned long) inode->i_ino);
dput(dir);
ubifs_ro_mode(c, -EPERM);
return -EPERM;
}
dput(dir);
return 0;
}
static const char *ubifs_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
......@@ -1746,7 +1713,7 @@ const struct file_operations ubifs_file_operations = {
.unlocked_ioctl = ubifs_ioctl,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.open = ubifs_file_open,
.open = fscrypt_file_open,
#ifdef CONFIG_COMPAT
.compat_ioctl = ubifs_compat_ioctl,
#endif
......
......@@ -1890,35 +1890,28 @@ static int search_dh_cookie(struct ubifs_info *c, const union ubifs_key *key,
union ubifs_key *dkey;
for (;;) {
if (!err) {
err = tnc_next(c, &znode, n);
if (err)
goto out;
}
zbr = &znode->zbranch[*n];
dkey = &zbr->key;
if (key_inum(c, dkey) != key_inum(c, key) ||
key_type(c, dkey) != key_type(c, key)) {
err = -ENOENT;
goto out;
return -ENOENT;
}
err = tnc_read_hashed_node(c, zbr, dent);
if (err)
goto out;
return err;
if (key_hash(c, key) == key_hash(c, dkey) &&
le32_to_cpu(dent->cookie) == cookie) {
*zn = znode;
goto out;
return 0;
}
}
out:
return err;
err = tnc_next(c, &znode, n);
if (err)
return err;
}
}
static int do_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
......
......@@ -381,8 +381,6 @@ ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf,
if (buf) {
/* If @buf is %NULL we are supposed to return the length */
if (ui->data_len > size) {
ubifs_err(c, "buffer size %zd, xattr len %d",
size, ui->data_len);
err = -ERANGE;
goto out_iput;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment