Commit 25cd6f35 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'fscrypt-for-linus' of git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt

Pull fscrypt updates from Eric Biggers:

 - Preparations for supporting encryption on ext4 filesystems where the
   filesystem block size is smaller than PAGE_SIZE.

 - Don't allow setting encryption policies on dead directories.

 - Various cleanups.

* tag 'fscrypt-for-linus' of git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt:
  fscrypt: document testing with xfstests
  fscrypt: remove selection of CONFIG_CRYPTO_SHA256
  fscrypt: remove unnecessary includes of ratelimit.h
  fscrypt: don't set policy for a dead directory
  ext4: encrypt only up to last block in ext4_bio_write_page()
  ext4: decrypt only the needed block in __ext4_block_zero_page_range()
  ext4: decrypt only the needed blocks in ext4_block_write_begin()
  ext4: clear BH_Uptodate flag on decryption error
  fscrypt: decrypt only the needed blocks in __fscrypt_decrypt_bio()
  fscrypt: support decrypting multiple filesystem blocks per page
  fscrypt: introduce fscrypt_decrypt_block_inplace()
  fscrypt: handle blocksize < PAGE_SIZE in fscrypt_zeroout_range()
  fscrypt: support encrypting multiple filesystem blocks per page
  fscrypt: introduce fscrypt_encrypt_block_inplace()
  fscrypt: clean up some BUG_ON()s in block encryption/decryption
  fscrypt: rename fscrypt_do_page_crypto() to fscrypt_crypt_block()
  fscrypt: remove the "write" part of struct fscrypt_ctx
  fscrypt: simplify bounce page handling
parents 40f06c79 05643363
...@@ -191,7 +191,9 @@ Currently, the following pairs of encryption modes are supported: ...@@ -191,7 +191,9 @@ Currently, the following pairs of encryption modes are supported:
If unsure, you should use the (AES-256-XTS, AES-256-CTS-CBC) pair. If unsure, you should use the (AES-256-XTS, AES-256-CTS-CBC) pair.
AES-128-CBC was added only for low-powered embedded devices with AES-128-CBC was added only for low-powered embedded devices with
crypto accelerators such as CAAM or CESA that do not support XTS. crypto accelerators such as CAAM or CESA that do not support XTS. To
use AES-128-CBC, CONFIG_CRYPTO_SHA256 (or another SHA-256
implementation) must be enabled so that ESSIV can be used.
Adiantum is a (primarily) stream cipher-based mode that is fast even Adiantum is a (primarily) stream cipher-based mode that is fast even
on CPUs without dedicated crypto instructions. It's also a true on CPUs without dedicated crypto instructions. It's also a true
...@@ -647,3 +649,42 @@ Note that the precise way that filenames are presented to userspace ...@@ -647,3 +649,42 @@ Note that the precise way that filenames are presented to userspace
without the key is subject to change in the future. It is only meant without the key is subject to change in the future. It is only meant
as a way to temporarily present valid filenames so that commands like as a way to temporarily present valid filenames so that commands like
``rm -r`` work as expected on encrypted directories. ``rm -r`` work as expected on encrypted directories.
Tests
=====
To test fscrypt, use xfstests, which is Linux's de facto standard
filesystem test suite. First, run all the tests in the "encrypt"
group on the relevant filesystem(s). For example, to test ext4 and
f2fs encryption using `kvm-xfstests
<https://github.com/tytso/xfstests-bld/blob/master/Documentation/kvm-quickstart.md>`_::
kvm-xfstests -c ext4,f2fs -g encrypt
UBIFS encryption can also be tested this way, but it should be done in
a separate command, and it takes some time for kvm-xfstests to set up
emulated UBI volumes::
kvm-xfstests -c ubifs -g encrypt
No tests should fail. However, tests that use non-default encryption
modes (e.g. generic/549 and generic/550) will be skipped if the needed
algorithms were not built into the kernel's crypto API. Also, tests
that access the raw block device (e.g. generic/399, generic/548,
generic/549, generic/550) will be skipped on UBIFS.
Besides running the "encrypt" group tests, for ext4 and f2fs it's also
possible to run most xfstests with the "test_dummy_encryption" mount
option. This option causes all new files to be automatically
encrypted with a dummy key, without having to make any API calls.
This tests the encrypted I/O paths more thoroughly. To do this with
kvm-xfstests, use the "encrypt" filesystem configuration::
kvm-xfstests -c ext4/encrypt,f2fs/encrypt -g auto
Because this runs many more tests than "-g encrypt" does, it takes
much longer to run; so also consider using `gce-xfstests
<https://github.com/tytso/xfstests-bld/blob/master/Documentation/gce-xfstests.md>`_
instead of kvm-xfstests::
gce-xfstests -c ext4/encrypt,f2fs/encrypt -g auto
...@@ -7,7 +7,6 @@ config FS_ENCRYPTION ...@@ -7,7 +7,6 @@ config FS_ENCRYPTION
select CRYPTO_ECB select CRYPTO_ECB
select CRYPTO_XTS select CRYPTO_XTS
select CRYPTO_CTS select CRYPTO_CTS
select CRYPTO_SHA256
select KEYS select KEYS
help help
Enable encryption of files and directories. This Enable encryption of files and directories. This
......
...@@ -33,9 +33,8 @@ static void __fscrypt_decrypt_bio(struct bio *bio, bool done) ...@@ -33,9 +33,8 @@ static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
bio_for_each_segment_all(bv, bio, iter_all) { bio_for_each_segment_all(bv, bio, iter_all) {
struct page *page = bv->bv_page; struct page *page = bv->bv_page;
int ret = fscrypt_decrypt_page(page->mapping->host, page, int ret = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len,
PAGE_SIZE, 0, page->index); bv->bv_offset);
if (ret) if (ret)
SetPageError(page); SetPageError(page);
else if (done) else if (done)
...@@ -53,9 +52,8 @@ EXPORT_SYMBOL(fscrypt_decrypt_bio); ...@@ -53,9 +52,8 @@ EXPORT_SYMBOL(fscrypt_decrypt_bio);
static void completion_pages(struct work_struct *work) static void completion_pages(struct work_struct *work)
{ {
struct fscrypt_ctx *ctx = struct fscrypt_ctx *ctx = container_of(work, struct fscrypt_ctx, work);
container_of(work, struct fscrypt_ctx, r.work); struct bio *bio = ctx->bio;
struct bio *bio = ctx->r.bio;
__fscrypt_decrypt_bio(bio, true); __fscrypt_decrypt_bio(bio, true);
fscrypt_release_ctx(ctx); fscrypt_release_ctx(ctx);
...@@ -64,57 +62,29 @@ static void completion_pages(struct work_struct *work) ...@@ -64,57 +62,29 @@ static void completion_pages(struct work_struct *work)
void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio) void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio)
{ {
INIT_WORK(&ctx->r.work, completion_pages); INIT_WORK(&ctx->work, completion_pages);
ctx->r.bio = bio; ctx->bio = bio;
fscrypt_enqueue_decrypt_work(&ctx->r.work); fscrypt_enqueue_decrypt_work(&ctx->work);
} }
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio); EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio);
void fscrypt_pullback_bio_page(struct page **page, bool restore)
{
struct fscrypt_ctx *ctx;
struct page *bounce_page;
/* The bounce data pages are unmapped. */
if ((*page)->mapping)
return;
/* The bounce data page is unmapped. */
bounce_page = *page;
ctx = (struct fscrypt_ctx *)page_private(bounce_page);
/* restore control page */
*page = ctx->w.control_page;
if (restore)
fscrypt_restore_control_page(bounce_page);
}
EXPORT_SYMBOL(fscrypt_pullback_bio_page);
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len) sector_t pblk, unsigned int len)
{ {
struct fscrypt_ctx *ctx; const unsigned int blockbits = inode->i_blkbits;
struct page *ciphertext_page = NULL; const unsigned int blocksize = 1 << blockbits;
struct page *ciphertext_page;
struct bio *bio; struct bio *bio;
int ret, err = 0; int ret, err = 0;
BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT);
if (!ciphertext_page)
ctx = fscrypt_get_ctx(GFP_NOFS); return -ENOMEM;
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT);
if (IS_ERR(ciphertext_page)) {
err = PTR_ERR(ciphertext_page);
goto errout;
}
while (len--) { while (len--) {
err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk, err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
ZERO_PAGE(0), ciphertext_page, ZERO_PAGE(0), ciphertext_page,
PAGE_SIZE, 0, GFP_NOFS); blocksize, 0, GFP_NOFS);
if (err) if (err)
goto errout; goto errout;
...@@ -124,14 +94,11 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, ...@@ -124,14 +94,11 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
goto errout; goto errout;
} }
bio_set_dev(bio, inode->i_sb->s_bdev); bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector = pblk << (blockbits - 9);
pblk << (inode->i_sb->s_blocksize_bits - 9);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
ret = bio_add_page(bio, ciphertext_page, ret = bio_add_page(bio, ciphertext_page, blocksize, 0);
inode->i_sb->s_blocksize, 0); if (WARN_ON(ret != blocksize)) {
if (ret != inode->i_sb->s_blocksize) {
/* should never happen! */ /* should never happen! */
WARN_ON(1);
bio_put(bio); bio_put(bio);
err = -EIO; err = -EIO;
goto errout; goto errout;
...@@ -147,7 +114,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, ...@@ -147,7 +114,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
} }
err = 0; err = 0;
errout: errout:
fscrypt_release_ctx(ctx); fscrypt_free_bounce_page(ciphertext_page);
return err; return err;
} }
EXPORT_SYMBOL(fscrypt_zeroout_range); EXPORT_SYMBOL(fscrypt_zeroout_range);
This diff is collapsed.
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
*/ */
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/ratelimit.h>
#include <crypto/skcipher.h> #include <crypto/skcipher.h>
#include "fscrypt_private.h" #include "fscrypt_private.h"
......
...@@ -94,7 +94,6 @@ typedef enum { ...@@ -94,7 +94,6 @@ typedef enum {
} fscrypt_direction_t; } fscrypt_direction_t;
#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 #define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
#define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002
static inline bool fscrypt_valid_enc_modes(u32 contents_mode, static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
u32 filenames_mode) u32 filenames_mode)
...@@ -117,14 +116,12 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode, ...@@ -117,14 +116,12 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
/* crypto.c */ /* crypto.c */
extern struct kmem_cache *fscrypt_info_cachep; extern struct kmem_cache *fscrypt_info_cachep;
extern int fscrypt_initialize(unsigned int cop_flags); extern int fscrypt_initialize(unsigned int cop_flags);
extern int fscrypt_do_page_crypto(const struct inode *inode, extern int fscrypt_crypt_block(const struct inode *inode,
fscrypt_direction_t rw, u64 lblk_num, fscrypt_direction_t rw, u64 lblk_num,
struct page *src_page, struct page *src_page, struct page *dest_page,
struct page *dest_page, unsigned int len, unsigned int offs,
unsigned int len, unsigned int offs, gfp_t gfp_flags);
gfp_t gfp_flags); extern struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags);
extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
gfp_t gfp_flags);
extern const struct dentry_operations fscrypt_d_ops; extern const struct dentry_operations fscrypt_d_ops;
extern void __printf(3, 4) __cold extern void __printf(3, 4) __cold
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
* Encryption hooks for higher-level filesystem operations. * Encryption hooks for higher-level filesystem operations.
*/ */
#include <linux/ratelimit.h>
#include "fscrypt_private.h" #include "fscrypt_private.h"
/** /**
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include <keys/user-type.h> #include <keys/user-type.h>
#include <linux/hashtable.h> #include <linux/hashtable.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/ratelimit.h>
#include <crypto/aes.h> #include <crypto/aes.h>
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/sha.h> #include <crypto/sha.h>
......
...@@ -81,6 +81,8 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg) ...@@ -81,6 +81,8 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg)
if (ret == -ENODATA) { if (ret == -ENODATA) {
if (!S_ISDIR(inode->i_mode)) if (!S_ISDIR(inode->i_mode))
ret = -ENOTDIR; ret = -ENOTDIR;
else if (IS_DEADDIR(inode))
ret = -ENOENT;
else if (!inode->i_sb->s_cop->empty_dir(inode)) else if (!inode->i_sb->s_cop->empty_dir(inode))
ret = -ENOTEMPTY; ret = -ENOTEMPTY;
else else
......
...@@ -1164,8 +1164,9 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, ...@@ -1164,8 +1164,9 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
int err = 0; int err = 0;
unsigned blocksize = inode->i_sb->s_blocksize; unsigned blocksize = inode->i_sb->s_blocksize;
unsigned bbits; unsigned bbits;
struct buffer_head *bh, *head, *wait[2], **wait_bh = wait; struct buffer_head *bh, *head, *wait[2];
bool decrypt = false; int nr_wait = 0;
int i;
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
BUG_ON(from > PAGE_SIZE); BUG_ON(from > PAGE_SIZE);
...@@ -1217,23 +1218,32 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, ...@@ -1217,23 +1218,32 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
!buffer_unwritten(bh) && !buffer_unwritten(bh) &&
(block_start < from || block_end > to)) { (block_start < from || block_end > to)) {
ll_rw_block(REQ_OP_READ, 0, 1, &bh); ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++ = bh; wait[nr_wait++] = bh;
decrypt = IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
} }
} }
/* /*
* If we issued read requests, let them complete. * If we issued read requests, let them complete.
*/ */
while (wait_bh > wait) { for (i = 0; i < nr_wait; i++) {
wait_on_buffer(*--wait_bh); wait_on_buffer(wait[i]);
if (!buffer_uptodate(*wait_bh)) if (!buffer_uptodate(wait[i]))
err = -EIO; err = -EIO;
} }
if (unlikely(err)) if (unlikely(err)) {
page_zero_new_buffers(page, from, to); page_zero_new_buffers(page, from, to);
else if (decrypt) } else if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
err = fscrypt_decrypt_page(page->mapping->host, page, for (i = 0; i < nr_wait; i++) {
PAGE_SIZE, 0, page->index); int err2;
err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
bh_offset(wait[i]));
if (err2) {
clear_buffer_uptodate(wait[i]);
err = err2;
}
}
}
return err; return err;
} }
#endif #endif
...@@ -4065,9 +4075,8 @@ static int __ext4_block_zero_page_range(handle_t *handle, ...@@ -4065,9 +4075,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) { if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
/* We expect the key to be set. */ /* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode)); BUG_ON(!fscrypt_has_encryption_key(inode));
BUG_ON(blocksize != PAGE_SIZE); WARN_ON_ONCE(fscrypt_decrypt_pagecache_blocks(
WARN_ON_ONCE(fscrypt_decrypt_page(page->mapping->host, page, blocksize, bh_offset(bh)));
page, PAGE_SIZE, 0, page->index));
} }
} }
if (ext4_should_journal_data(inode)) { if (ext4_should_journal_data(inode)) {
......
...@@ -66,9 +66,7 @@ static void ext4_finish_bio(struct bio *bio) ...@@ -66,9 +66,7 @@ static void ext4_finish_bio(struct bio *bio)
bio_for_each_segment_all(bvec, bio, iter_all) { bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
#ifdef CONFIG_FS_ENCRYPTION struct page *bounce_page = NULL;
struct page *data_page = NULL;
#endif
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
unsigned bio_start = bvec->bv_offset; unsigned bio_start = bvec->bv_offset;
unsigned bio_end = bio_start + bvec->bv_len; unsigned bio_end = bio_start + bvec->bv_len;
...@@ -78,13 +76,10 @@ static void ext4_finish_bio(struct bio *bio) ...@@ -78,13 +76,10 @@ static void ext4_finish_bio(struct bio *bio)
if (!page) if (!page)
continue; continue;
#ifdef CONFIG_FS_ENCRYPTION if (fscrypt_is_bounce_page(page)) {
if (!page->mapping) { bounce_page = page;
/* The bounce data pages are unmapped. */ page = fscrypt_pagecache_page(bounce_page);
data_page = page;
fscrypt_pullback_bio_page(&page, false);
} }
#endif
if (bio->bi_status) { if (bio->bi_status) {
SetPageError(page); SetPageError(page);
...@@ -111,10 +106,7 @@ static void ext4_finish_bio(struct bio *bio) ...@@ -111,10 +106,7 @@ static void ext4_finish_bio(struct bio *bio)
bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
local_irq_restore(flags); local_irq_restore(flags);
if (!under_io) { if (!under_io) {
#ifdef CONFIG_FS_ENCRYPTION fscrypt_free_bounce_page(bounce_page);
if (data_page)
fscrypt_restore_control_page(data_page);
#endif
end_page_writeback(page); end_page_writeback(page);
} }
} }
...@@ -415,7 +407,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, ...@@ -415,7 +407,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
struct writeback_control *wbc, struct writeback_control *wbc,
bool keep_towrite) bool keep_towrite)
{ {
struct page *data_page = NULL; struct page *bounce_page = NULL;
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
unsigned block_start; unsigned block_start;
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
...@@ -475,14 +467,22 @@ int ext4_bio_write_page(struct ext4_io_submit *io, ...@@ -475,14 +467,22 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
bh = head = page_buffers(page); bh = head = page_buffers(page);
/*
* If any blocks are being written to an encrypted file, encrypt them
* into a bounce page. For simplicity, just encrypt until the last
* block which might be needed. This may cause some unneeded blocks
* (e.g. holes) to be unnecessarily encrypted, but this is rare and
* can't happen in the common case of blocksize == PAGE_SIZE.
*/
if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && nr_to_submit) { if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && nr_to_submit) {
gfp_t gfp_flags = GFP_NOFS; gfp_t gfp_flags = GFP_NOFS;
unsigned int enc_bytes = round_up(len, i_blocksize(inode));
retry_encrypt: retry_encrypt:
data_page = fscrypt_encrypt_page(inode, page, PAGE_SIZE, 0, bounce_page = fscrypt_encrypt_pagecache_blocks(page, enc_bytes,
page->index, gfp_flags); 0, gfp_flags);
if (IS_ERR(data_page)) { if (IS_ERR(bounce_page)) {
ret = PTR_ERR(data_page); ret = PTR_ERR(bounce_page);
if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) { if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
if (io->io_bio) { if (io->io_bio) {
ext4_io_submit(io); ext4_io_submit(io);
...@@ -491,7 +491,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, ...@@ -491,7 +491,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
gfp_flags |= __GFP_NOFAIL; gfp_flags |= __GFP_NOFAIL;
goto retry_encrypt; goto retry_encrypt;
} }
data_page = NULL; bounce_page = NULL;
goto out; goto out;
} }
} }
...@@ -500,8 +500,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, ...@@ -500,8 +500,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
do { do {
if (!buffer_async_write(bh)) if (!buffer_async_write(bh))
continue; continue;
ret = io_submit_add_bh(io, inode, ret = io_submit_add_bh(io, inode, bounce_page ?: page, bh);
data_page ? data_page : page, bh);
if (ret) { if (ret) {
/* /*
* We only get here on ENOMEM. Not much else * We only get here on ENOMEM. Not much else
...@@ -517,8 +516,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, ...@@ -517,8 +516,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
/* Error stopped previous loop? Clean up buffers... */ /* Error stopped previous loop? Clean up buffers... */
if (ret) { if (ret) {
out: out:
if (data_page) fscrypt_free_bounce_page(bounce_page);
fscrypt_restore_control_page(data_page);
printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret); printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
redirty_page_for_writepage(wbc, page); redirty_page_for_writepage(wbc, page);
do { do {
......
...@@ -185,7 +185,7 @@ static void f2fs_write_end_io(struct bio *bio) ...@@ -185,7 +185,7 @@ static void f2fs_write_end_io(struct bio *bio)
continue; continue;
} }
fscrypt_pullback_bio_page(&page, true); fscrypt_finalize_bounce_page(&page);
if (unlikely(bio->bi_status)) { if (unlikely(bio->bi_status)) {
mapping_set_error(page->mapping, -EIO); mapping_set_error(page->mapping, -EIO);
...@@ -362,10 +362,9 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, ...@@ -362,10 +362,9 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
bio_for_each_segment_all(bvec, io->bio, iter_all) { bio_for_each_segment_all(bvec, io->bio, iter_all) {
if (bvec->bv_page->mapping) target = bvec->bv_page;
target = bvec->bv_page; if (fscrypt_is_bounce_page(target))
else target = fscrypt_pagecache_page(target);
target = fscrypt_control_page(bvec->bv_page);
if (inode && inode == target->mapping->host) if (inode && inode == target->mapping->host)
return true; return true;
...@@ -1727,8 +1726,9 @@ static int encrypt_one_page(struct f2fs_io_info *fio) ...@@ -1727,8 +1726,9 @@ static int encrypt_one_page(struct f2fs_io_info *fio)
f2fs_wait_on_block_writeback(inode, fio->old_blkaddr); f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
retry_encrypt: retry_encrypt:
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page, fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(fio->page,
PAGE_SIZE, 0, fio->page->index, gfp_flags); PAGE_SIZE, 0,
gfp_flags);
if (IS_ERR(fio->encrypted_page)) { if (IS_ERR(fio->encrypted_page)) {
/* flush pending IOs and wait for a while in the ENOMEM case */ /* flush pending IOs and wait for a while in the ENOMEM case */
if (PTR_ERR(fio->encrypted_page) == -ENOMEM) { if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
...@@ -1900,8 +1900,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio) ...@@ -1900,8 +1900,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
err = f2fs_inplace_write_data(fio); err = f2fs_inplace_write_data(fio);
if (err) { if (err) {
if (f2fs_encrypted_file(inode)) if (f2fs_encrypted_file(inode))
fscrypt_pullback_bio_page(&fio->encrypted_page, fscrypt_finalize_bounce_page(&fio->encrypted_page);
true);
if (PageWriteback(page)) if (PageWriteback(page))
end_page_writeback(page); end_page_writeback(page);
} else { } else {
......
...@@ -29,8 +29,8 @@ int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn, ...@@ -29,8 +29,8 @@ int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn,
{ {
struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_info *c = inode->i_sb->s_fs_info;
void *p = &dn->data; void *p = &dn->data;
struct page *ret;
unsigned int pad_len = round_up(in_len, UBIFS_CIPHER_BLOCK_SIZE); unsigned int pad_len = round_up(in_len, UBIFS_CIPHER_BLOCK_SIZE);
int err;
ubifs_assert(c, pad_len <= *out_len); ubifs_assert(c, pad_len <= *out_len);
dn->compr_size = cpu_to_le16(in_len); dn->compr_size = cpu_to_le16(in_len);
...@@ -39,11 +39,11 @@ int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn, ...@@ -39,11 +39,11 @@ int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn,
if (pad_len != in_len) if (pad_len != in_len)
memset(p + in_len, 0, pad_len - in_len); memset(p + in_len, 0, pad_len - in_len);
ret = fscrypt_encrypt_page(inode, virt_to_page(&dn->data), pad_len, err = fscrypt_encrypt_block_inplace(inode, virt_to_page(p), pad_len,
offset_in_page(&dn->data), block, GFP_NOFS); offset_in_page(p), block, GFP_NOFS);
if (IS_ERR(ret)) { if (err) {
ubifs_err(c, "fscrypt_encrypt_page failed: %ld", PTR_ERR(ret)); ubifs_err(c, "fscrypt_encrypt_block_inplace() failed: %d", err);
return PTR_ERR(ret); return err;
} }
*out_len = pad_len; *out_len = pad_len;
...@@ -64,10 +64,11 @@ int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn, ...@@ -64,10 +64,11 @@ int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn,
} }
ubifs_assert(c, dlen <= UBIFS_BLOCK_SIZE); ubifs_assert(c, dlen <= UBIFS_BLOCK_SIZE);
err = fscrypt_decrypt_page(inode, virt_to_page(&dn->data), dlen, err = fscrypt_decrypt_block_inplace(inode, virt_to_page(&dn->data),
offset_in_page(&dn->data), block); dlen, offset_in_page(&dn->data),
block);
if (err) { if (err) {
ubifs_err(c, "fscrypt_decrypt_page failed: %i", err); ubifs_err(c, "fscrypt_decrypt_block_inplace() failed: %d", err);
return err; return err;
} }
*out_len = clen; *out_len = clen;
......
...@@ -63,16 +63,13 @@ struct fscrypt_operations { ...@@ -63,16 +63,13 @@ struct fscrypt_operations {
unsigned int max_namelen; unsigned int max_namelen;
}; };
/* Decryption work */
struct fscrypt_ctx { struct fscrypt_ctx {
union { union {
struct {
struct page *bounce_page; /* Ciphertext page */
struct page *control_page; /* Original page */
} w;
struct { struct {
struct bio *bio; struct bio *bio;
struct work_struct work; struct work_struct work;
} r; };
struct list_head free_list; /* Free list */ struct list_head free_list; /* Free list */
}; };
u8 flags; /* Flags */ u8 flags; /* Flags */
...@@ -106,18 +103,33 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry) ...@@ -106,18 +103,33 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry)
extern void fscrypt_enqueue_decrypt_work(struct work_struct *); extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
extern struct fscrypt_ctx *fscrypt_get_ctx(gfp_t); extern struct fscrypt_ctx *fscrypt_get_ctx(gfp_t);
extern void fscrypt_release_ctx(struct fscrypt_ctx *); extern void fscrypt_release_ctx(struct fscrypt_ctx *);
extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
unsigned int, unsigned int,
u64, gfp_t);
extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
unsigned int, u64);
static inline struct page *fscrypt_control_page(struct page *page) extern struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
unsigned int len,
unsigned int offs,
gfp_t gfp_flags);
extern int fscrypt_encrypt_block_inplace(const struct inode *inode,
struct page *page, unsigned int len,
unsigned int offs, u64 lblk_num,
gfp_t gfp_flags);
extern int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
unsigned int offs);
extern int fscrypt_decrypt_block_inplace(const struct inode *inode,
struct page *page, unsigned int len,
unsigned int offs, u64 lblk_num);
static inline bool fscrypt_is_bounce_page(struct page *page)
{
return page->mapping == NULL;
}
static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
{ {
return ((struct fscrypt_ctx *)page_private(page))->w.control_page; return (struct page *)page_private(bounce_page);
} }
extern void fscrypt_restore_control_page(struct page *); extern void fscrypt_free_bounce_page(struct page *bounce_page);
/* policy.c */ /* policy.c */
extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
...@@ -223,7 +235,6 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname, ...@@ -223,7 +235,6 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
extern void fscrypt_decrypt_bio(struct bio *); extern void fscrypt_decrypt_bio(struct bio *);
extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
struct bio *bio); struct bio *bio);
extern void fscrypt_pullback_bio_page(struct page **, bool);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int); unsigned int);
...@@ -283,32 +294,51 @@ static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx) ...@@ -283,32 +294,51 @@ static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
return; return;
} }
static inline struct page *fscrypt_encrypt_page(const struct inode *inode, static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
unsigned int len,
unsigned int offs,
gfp_t gfp_flags)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline int fscrypt_encrypt_block_inplace(const struct inode *inode,
struct page *page, struct page *page,
unsigned int len, unsigned int len,
unsigned int offs, unsigned int offs, u64 lblk_num,
u64 lblk_num, gfp_t gfp_flags) gfp_t gfp_flags)
{ {
return ERR_PTR(-EOPNOTSUPP); return -EOPNOTSUPP;
}
static inline int fscrypt_decrypt_pagecache_blocks(struct page *page,
unsigned int len,
unsigned int offs)
{
return -EOPNOTSUPP;
} }
static inline int fscrypt_decrypt_page(const struct inode *inode, static inline int fscrypt_decrypt_block_inplace(const struct inode *inode,
struct page *page, struct page *page,
unsigned int len, unsigned int offs, unsigned int len,
u64 lblk_num) unsigned int offs, u64 lblk_num)
{ {
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline struct page *fscrypt_control_page(struct page *page) static inline bool fscrypt_is_bounce_page(struct page *page)
{
return false;
}
static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
{ {
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
static inline void fscrypt_restore_control_page(struct page *page) static inline void fscrypt_free_bounce_page(struct page *bounce_page)
{ {
return;
} }
/* policy.c */ /* policy.c */
...@@ -410,11 +440,6 @@ static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, ...@@ -410,11 +440,6 @@ static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
{ {
} }
static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
{
return;
}
static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len) sector_t pblk, unsigned int len)
{ {
...@@ -692,4 +717,15 @@ static inline int fscrypt_encrypt_symlink(struct inode *inode, ...@@ -692,4 +717,15 @@ static inline int fscrypt_encrypt_symlink(struct inode *inode,
return 0; return 0;
} }
/* If *pagep is a bounce page, free it and set *pagep to the pagecache page */
static inline void fscrypt_finalize_bounce_page(struct page **pagep)
{
struct page *page = *pagep;
if (fscrypt_is_bounce_page(page)) {
*pagep = fscrypt_pagecache_page(page);
fscrypt_free_bounce_page(page);
}
}
#endif /* _LINUX_FSCRYPT_H */ #endif /* _LINUX_FSCRYPT_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment