Commit 08ffb584 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pstore-v4.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull pstore updates from Kees Cook:
 "pstore improvements:

   - refactor init to happen as early as possible again (Joel Fernandes)

   - improve resource reservation names"

* tag 'pstore-v4.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  pstore/ram: Clarify resource reservation labels
  pstore: Refactor compression initialization
  pstore: Allocate compression during late_initcall()
  pstore: Centralize init/exit routines
parents 638820d8 1227daa4
......@@ -482,12 +482,10 @@ static struct file_system_type pstore_fs_type = {
.kill_sb = pstore_kill_sb,
};
static int __init init_pstore_fs(void)
int __init pstore_init_fs(void)
{
int err;
pstore_choose_compression();
/* Create a convenient mount point for people to access pstore */
err = sysfs_create_mount_point(fs_kobj, "pstore");
if (err)
......@@ -500,14 +498,9 @@ static int __init init_pstore_fs(void)
out:
return err;
}
module_init(init_pstore_fs)
static void __exit exit_pstore_fs(void)
void __exit pstore_exit_fs(void)
{
unregister_filesystem(&pstore_fs_type);
sysfs_remove_mount_point(fs_kobj, "pstore");
}
module_exit(exit_pstore_fs)
MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
MODULE_LICENSE("GPL");
......@@ -37,7 +37,8 @@ extern bool pstore_is_mounted(void);
extern void pstore_record_init(struct pstore_record *record,
struct pstore_info *psi);
/* Called during module_init() */
extern void __init pstore_choose_compression(void);
/* Called during pstore init/exit. */
int __init pstore_init_fs(void);
void __exit pstore_exit_fs(void);
#endif
......@@ -274,36 +274,56 @@ static int pstore_decompress(void *in, void *out,
static void allocate_buf_for_compression(void)
{
struct crypto_comp *ctx;
int size;
char *buf;
/* Skip if not built-in or compression backend not selected yet. */
if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !zbackend)
return;
/* Skip if no pstore backend yet or compression init already done. */
if (!psinfo || tfm)
return;
if (!crypto_has_comp(zbackend->name, 0, 0)) {
pr_err("No %s compression\n", zbackend->name);
pr_err("Unknown compression: %s\n", zbackend->name);
return;
}
big_oops_buf_sz = zbackend->zbufsize(psinfo->bufsize);
if (big_oops_buf_sz <= 0)
size = zbackend->zbufsize(psinfo->bufsize);
if (size <= 0) {
pr_err("Invalid compression size for %s: %d\n",
zbackend->name, size);
return;
}
big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
if (!big_oops_buf) {
pr_err("allocate compression buffer error!\n");
buf = kmalloc(size, GFP_KERNEL);
if (!buf) {
pr_err("Failed %d byte compression buffer allocation for: %s\n",
size, zbackend->name);
return;
}
tfm = crypto_alloc_comp(zbackend->name, 0, 0);
if (IS_ERR_OR_NULL(tfm)) {
kfree(big_oops_buf);
big_oops_buf = NULL;
pr_err("crypto_alloc_comp() failed!\n");
ctx = crypto_alloc_comp(zbackend->name, 0, 0);
if (IS_ERR_OR_NULL(ctx)) {
kfree(buf);
pr_err("crypto_alloc_comp('%s') failed: %ld\n", zbackend->name,
PTR_ERR(ctx));
return;
}
/* A non-NULL big_oops_buf indicates compression is available. */
tfm = ctx;
big_oops_buf_sz = size;
big_oops_buf = buf;
pr_info("Using compression: %s\n", zbackend->name);
}
static void free_buf_for_compression(void)
{
if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && !IS_ERR_OR_NULL(tfm))
if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm)
crypto_free_comp(tfm);
kfree(big_oops_buf);
big_oops_buf = NULL;
......@@ -774,14 +794,43 @@ void __init pstore_choose_compression(void)
for (step = zbackends; step->name; step++) {
if (!strcmp(compress, step->name)) {
zbackend = step;
pr_info("using %s compression\n", zbackend->name);
return;
}
}
}
static int __init pstore_init(void)
{
int ret;
pstore_choose_compression();
/*
* Check if any pstore backends registered earlier but did not
* initialize compression because crypto was not ready. If so,
* initialize compression now.
*/
allocate_buf_for_compression();
ret = pstore_init_fs();
if (ret)
return ret;
return 0;
}
late_initcall(pstore_init);
static void __exit pstore_exit(void)
{
pstore_exit_fs();
}
module_exit(pstore_exit)
module_param(compress, charp, 0444);
MODULE_PARM_DESC(compress, "Pstore compression to use");
module_param(backend, charp, 0444);
MODULE_PARM_DESC(backend, "Pstore backend to use");
MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
MODULE_LICENSE("GPL");
......@@ -587,9 +587,16 @@ static int ramoops_init_przs(const char *name,
goto fail;
for (i = 0; i < *cnt; i++) {
char *label;
if (*cnt == 1)
label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
else
label = kasprintf(GFP_KERNEL, "ramoops:%s(%d/%d)",
name, i, *cnt - 1);
prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig,
&cxt->ecc_info,
cxt->memtype, flags);
&cxt->ecc_info,
cxt->memtype, flags, label);
if (IS_ERR(prz_ar[i])) {
err = PTR_ERR(prz_ar[i]);
dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n",
......@@ -619,6 +626,8 @@ static int ramoops_init_prz(const char *name,
struct persistent_ram_zone **prz,
phys_addr_t *paddr, size_t sz, u32 sig)
{
char *label;
if (!sz)
return 0;
......@@ -629,8 +638,9 @@ static int ramoops_init_prz(const char *name,
return -ENOMEM;
}
label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
*prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
cxt->memtype, 0);
cxt->memtype, 0, label);
if (IS_ERR(*prz)) {
int err = PTR_ERR(*prz);
......@@ -962,7 +972,7 @@ static int __init ramoops_init(void)
return ret;
}
late_initcall(ramoops_init);
postcore_initcall(ramoops_init);
static void __exit ramoops_exit(void)
{
......
......@@ -438,11 +438,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size,
}
static void *persistent_ram_iomap(phys_addr_t start, size_t size,
unsigned int memtype)
unsigned int memtype, char *label)
{
void *va;
if (!request_mem_region(start, size, "persistent_ram")) {
if (!request_mem_region(start, size, label ?: "ramoops")) {
pr_err("request mem region (0x%llx@0x%llx) failed\n",
(unsigned long long)size, (unsigned long long)start);
return NULL;
......@@ -470,7 +470,8 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
if (pfn_valid(start >> PAGE_SHIFT))
prz->vaddr = persistent_ram_vmap(start, size, memtype);
else
prz->vaddr = persistent_ram_iomap(start, size, memtype);
prz->vaddr = persistent_ram_iomap(start, size, memtype,
prz->label);
if (!prz->vaddr) {
pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
......@@ -541,12 +542,13 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
prz->ecc_info.par = NULL;
persistent_ram_free_old(prz);
kfree(prz->label);
kfree(prz);
}
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
u32 sig, struct persistent_ram_ecc_info *ecc_info,
unsigned int memtype, u32 flags)
unsigned int memtype, u32 flags, char *label)
{
struct persistent_ram_zone *prz;
int ret = -ENOMEM;
......@@ -560,6 +562,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
/* Initialize general buffer state. */
raw_spin_lock_init(&prz->buffer_lock);
prz->flags = flags;
prz->label = label;
ret = persistent_ram_buffer_map(start, size, prz, memtype);
if (ret)
......
......@@ -46,6 +46,7 @@ struct persistent_ram_zone {
phys_addr_t paddr;
size_t size;
void *vaddr;
char *label;
struct persistent_ram_buffer *buffer;
size_t buffer_size;
u32 flags;
......@@ -65,7 +66,7 @@ struct persistent_ram_zone {
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
u32 sig, struct persistent_ram_ecc_info *ecc_info,
unsigned int memtype, u32 flags);
unsigned int memtype, u32 flags, char *label);
void persistent_ram_free(struct persistent_ram_zone *prz);
void persistent_ram_zap(struct persistent_ram_zone *prz);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment