Commit f6feea56 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mm-hotfixes-stable-2023-02-13-13-50' of...

Merge tag 'mm-hotfixes-stable-2023-02-13-13-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "Twelve hotfixes, mostly against mm/.

  Five of these fixes are cc:stable"

* tag 'mm-hotfixes-stable-2023-02-13-13-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  of: reserved_mem: Have kmemleak ignore dynamically allocated reserved mem
  scripts/gdb: fix 'lx-current' for x86
  lib: parser: optimize match_NUMBER apis to use local array
  mm: shrinkers: fix deadlock in shrinker debugfs
  mm: hwpoison: support recovery from ksm_might_need_to_copy()
  kasan: fix Oops due to missing calls to kasan_arch_is_ready()
  revert "squashfs: harden sanity check in squashfs_read_xattr_id_table"
  fsdax: dax_unshare_iter() should return a valid length
  mm/gup: add folio to list when folio_isolate_lru() succeed
  aio: fix mremap after fork null-deref
  mailmap: add entry for Alexander Mikhalitsyn
  mm: extend max struct page size for kmsan
parents b408817d ce4d9a1e
...@@ -25,6 +25,8 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com> ...@@ -25,6 +25,8 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
Alexander Lobakin <alobakin@pm.me> <alobakin@dlink.ru> Alexander Lobakin <alobakin@pm.me> <alobakin@dlink.ru>
Alexander Lobakin <alobakin@pm.me> <alobakin@marvell.com> Alexander Lobakin <alobakin@pm.me> <alobakin@marvell.com>
Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru> Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru>
Alexander Mikhalitsyn <alexander@mihalicyn.com> <alexander.mikhalitsyn@virtuozzo.com>
Alexander Mikhalitsyn <alexander@mihalicyn.com> <aleksandr.mikhalitsyn@canonical.com>
Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electrons.com> Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electrons.com>
Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com> Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
Alexei Starovoitov <ast@kernel.org> <ast@fb.com> Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
......
...@@ -48,9 +48,10 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, ...@@ -48,9 +48,10 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
err = memblock_mark_nomap(base, size); err = memblock_mark_nomap(base, size);
if (err) if (err)
memblock_phys_free(base, size); memblock_phys_free(base, size);
kmemleak_ignore_phys(base);
} }
kmemleak_ignore_phys(base);
return err; return err;
} }
......
...@@ -361,6 +361,9 @@ static int aio_ring_mremap(struct vm_area_struct *vma) ...@@ -361,6 +361,9 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
spin_lock(&mm->ioctx_lock); spin_lock(&mm->ioctx_lock);
rcu_read_lock(); rcu_read_lock();
table = rcu_dereference(mm->ioctx_table); table = rcu_dereference(mm->ioctx_table);
if (!table)
goto out_unlock;
for (i = 0; i < table->nr; i++) { for (i = 0; i < table->nr; i++) {
struct kioctx *ctx; struct kioctx *ctx;
...@@ -374,6 +377,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma) ...@@ -374,6 +377,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
} }
} }
out_unlock:
rcu_read_unlock(); rcu_read_unlock();
spin_unlock(&mm->ioctx_lock); spin_unlock(&mm->ioctx_lock);
return res; return res;
......
...@@ -1271,8 +1271,9 @@ static s64 dax_unshare_iter(struct iomap_iter *iter) ...@@ -1271,8 +1271,9 @@ static s64 dax_unshare_iter(struct iomap_iter *iter)
if (ret < 0) if (ret < 0)
goto out_unlock; goto out_unlock;
ret = copy_mc_to_kernel(daddr, saddr, length); if (copy_mc_to_kernel(daddr, saddr, length) == 0)
if (ret) ret = length;
else
ret = -EIO; ret = -EIO;
out_unlock: out_unlock:
......
...@@ -76,7 +76,7 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start, ...@@ -76,7 +76,7 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
/* Sanity check values */ /* Sanity check values */
/* there is always at least one xattr id */ /* there is always at least one xattr id */
if (*xattr_ids <= 0) if (*xattr_ids == 0)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
......
...@@ -137,7 +137,7 @@ extern int mmap_rnd_compat_bits __read_mostly; ...@@ -137,7 +137,7 @@ extern int mmap_rnd_compat_bits __read_mostly;
* define their own version of this macro in <asm/pgtable.h> * define their own version of this macro in <asm/pgtable.h>
*/ */
#if BITS_PER_LONG == 64 #if BITS_PER_LONG == 64
/* This function must be updated when the size of struct page grows above 80 /* This function must be updated when the size of struct page grows above 96
* or reduces below 56. The idea that compiler optimizes out switch() * or reduces below 56. The idea that compiler optimizes out switch()
* statement, and only leaves move/store instructions. Also the compiler can * statement, and only leaves move/store instructions. Also the compiler can
* combine write statements if they are both assignments and can be reordered, * combine write statements if they are both assignments and can be reordered,
...@@ -148,12 +148,18 @@ static inline void __mm_zero_struct_page(struct page *page) ...@@ -148,12 +148,18 @@ static inline void __mm_zero_struct_page(struct page *page)
{ {
unsigned long *_pp = (void *)page; unsigned long *_pp = (void *)page;
/* Check that struct page is either 56, 64, 72, or 80 bytes */ /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */
BUILD_BUG_ON(sizeof(struct page) & 7); BUILD_BUG_ON(sizeof(struct page) & 7);
BUILD_BUG_ON(sizeof(struct page) < 56); BUILD_BUG_ON(sizeof(struct page) < 56);
BUILD_BUG_ON(sizeof(struct page) > 80); BUILD_BUG_ON(sizeof(struct page) > 96);
switch (sizeof(struct page)) { switch (sizeof(struct page)) {
case 96:
_pp[11] = 0;
fallthrough;
case 88:
_pp[10] = 0;
fallthrough;
case 80: case 80:
_pp[9] = 0; _pp[9] = 0;
fallthrough; fallthrough;
......
...@@ -107,7 +107,7 @@ extern void synchronize_shrinkers(void); ...@@ -107,7 +107,7 @@ extern void synchronize_shrinkers(void);
#ifdef CONFIG_SHRINKER_DEBUG #ifdef CONFIG_SHRINKER_DEBUG
extern int shrinker_debugfs_add(struct shrinker *shrinker); extern int shrinker_debugfs_add(struct shrinker *shrinker);
extern void shrinker_debugfs_remove(struct shrinker *shrinker); extern struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker);
extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker, extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
const char *fmt, ...); const char *fmt, ...);
#else /* CONFIG_SHRINKER_DEBUG */ #else /* CONFIG_SHRINKER_DEBUG */
...@@ -115,8 +115,9 @@ static inline int shrinker_debugfs_add(struct shrinker *shrinker) ...@@ -115,8 +115,9 @@ static inline int shrinker_debugfs_add(struct shrinker *shrinker)
{ {
return 0; return 0;
} }
static inline void shrinker_debugfs_remove(struct shrinker *shrinker) static inline struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
{ {
return NULL;
} }
static inline __printf(2, 3) static inline __printf(2, 3)
int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...) int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
......
...@@ -11,6 +11,15 @@ ...@@ -11,6 +11,15 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h> #include <linux/string.h>
/*
* max size needed by different bases to express U64
* HEX: "0xFFFFFFFFFFFFFFFF" --> 18
* DEC: "18446744073709551615" --> 20
* OCT: "01777777777777777777777" --> 23
* pick the max one to define NUMBER_BUF_LEN
*/
#define NUMBER_BUF_LEN 24
/** /**
* match_one - Determines if a string matches a simple pattern * match_one - Determines if a string matches a simple pattern
* @s: the string to examine for presence of the pattern * @s: the string to examine for presence of the pattern
...@@ -129,14 +138,12 @@ EXPORT_SYMBOL(match_token); ...@@ -129,14 +138,12 @@ EXPORT_SYMBOL(match_token);
static int match_number(substring_t *s, int *result, int base) static int match_number(substring_t *s, int *result, int base)
{ {
char *endp; char *endp;
char *buf; char buf[NUMBER_BUF_LEN];
int ret; int ret;
long val; long val;
buf = match_strdup(s); if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN)
if (!buf) return -ERANGE;
return -ENOMEM;
ret = 0; ret = 0;
val = simple_strtol(buf, &endp, base); val = simple_strtol(buf, &endp, base);
if (endp == buf) if (endp == buf)
...@@ -145,7 +152,6 @@ static int match_number(substring_t *s, int *result, int base) ...@@ -145,7 +152,6 @@ static int match_number(substring_t *s, int *result, int base)
ret = -ERANGE; ret = -ERANGE;
else else
*result = (int) val; *result = (int) val;
kfree(buf);
return ret; return ret;
} }
...@@ -163,18 +169,15 @@ static int match_number(substring_t *s, int *result, int base) ...@@ -163,18 +169,15 @@ static int match_number(substring_t *s, int *result, int base)
*/ */
static int match_u64int(substring_t *s, u64 *result, int base) static int match_u64int(substring_t *s, u64 *result, int base)
{ {
char *buf; char buf[NUMBER_BUF_LEN];
int ret; int ret;
u64 val; u64 val;
buf = match_strdup(s); if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN)
if (!buf) return -ERANGE;
return -ENOMEM;
ret = kstrtoull(buf, base, &val); ret = kstrtoull(buf, base, &val);
if (!ret) if (!ret)
*result = val; *result = val;
kfree(buf);
return ret; return ret;
} }
...@@ -206,14 +209,12 @@ EXPORT_SYMBOL(match_int); ...@@ -206,14 +209,12 @@ EXPORT_SYMBOL(match_int);
*/ */
int match_uint(substring_t *s, unsigned int *result) int match_uint(substring_t *s, unsigned int *result)
{ {
int err = -ENOMEM; char buf[NUMBER_BUF_LEN];
char *buf = match_strdup(s);
if (buf) { if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN)
err = kstrtouint(buf, 10, result); return -ERANGE;
kfree(buf);
} return kstrtouint(buf, 10, result);
return err;
} }
EXPORT_SYMBOL(match_uint); EXPORT_SYMBOL(match_uint);
......
...@@ -1914,7 +1914,7 @@ static unsigned long collect_longterm_unpinnable_pages( ...@@ -1914,7 +1914,7 @@ static unsigned long collect_longterm_unpinnable_pages(
drain_allow = false; drain_allow = false;
} }
if (!folio_isolate_lru(folio)) if (folio_isolate_lru(folio))
continue; continue;
list_add_tail(&folio->lru, movable_page_list); list_add_tail(&folio->lru, movable_page_list);
......
...@@ -246,6 +246,9 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, ...@@ -246,6 +246,9 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object,
static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip) static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
{ {
if (!kasan_arch_is_ready())
return false;
if (ptr != page_address(virt_to_head_page(ptr))) { if (ptr != page_address(virt_to_head_page(ptr))) {
kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE); kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
return true; return true;
......
...@@ -191,7 +191,12 @@ bool kasan_check_range(unsigned long addr, size_t size, bool write, ...@@ -191,7 +191,12 @@ bool kasan_check_range(unsigned long addr, size_t size, bool write,
bool kasan_byte_accessible(const void *addr) bool kasan_byte_accessible(const void *addr)
{ {
s8 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr)); s8 shadow_byte;
if (!kasan_arch_is_ready())
return true;
shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE; return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
} }
......
...@@ -291,6 +291,9 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size) ...@@ -291,6 +291,9 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
unsigned long shadow_start, shadow_end; unsigned long shadow_start, shadow_end;
int ret; int ret;
if (!kasan_arch_is_ready())
return 0;
if (!is_vmalloc_or_module_addr((void *)addr)) if (!is_vmalloc_or_module_addr((void *)addr))
return 0; return 0;
...@@ -459,6 +462,9 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end, ...@@ -459,6 +462,9 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long region_start, region_end; unsigned long region_start, region_end;
unsigned long size; unsigned long size;
if (!kasan_arch_is_ready())
return;
region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE); region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE); region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
...@@ -502,6 +508,9 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, ...@@ -502,6 +508,9 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
* with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored. * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
*/ */
if (!kasan_arch_is_ready())
return (void *)start;
if (!is_vmalloc_or_module_addr(start)) if (!is_vmalloc_or_module_addr(start))
return (void *)start; return (void *)start;
...@@ -524,6 +533,9 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, ...@@ -524,6 +533,9 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
*/ */
void __kasan_poison_vmalloc(const void *start, unsigned long size) void __kasan_poison_vmalloc(const void *start, unsigned long size)
{ {
if (!kasan_arch_is_ready())
return;
if (!is_vmalloc_or_module_addr(start)) if (!is_vmalloc_or_module_addr(start))
return; return;
......
...@@ -2629,8 +2629,11 @@ struct page *ksm_might_need_to_copy(struct page *page, ...@@ -2629,8 +2629,11 @@ struct page *ksm_might_need_to_copy(struct page *page,
new_page = NULL; new_page = NULL;
} }
if (new_page) { if (new_page) {
copy_user_highpage(new_page, page, address, vma); if (copy_mc_user_highpage(new_page, page, address, vma)) {
put_page(new_page);
memory_failure_queue(page_to_pfn(page), 0);
return ERR_PTR(-EHWPOISON);
}
SetPageDirty(new_page); SetPageDirty(new_page);
__SetPageUptodate(new_page); __SetPageUptodate(new_page);
__SetPageLocked(new_page); __SetPageLocked(new_page);
......
...@@ -3840,6 +3840,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3840,6 +3840,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (unlikely(!page)) { if (unlikely(!page)) {
ret = VM_FAULT_OOM; ret = VM_FAULT_OOM;
goto out_page; goto out_page;
} else if (unlikely(PTR_ERR(page) == -EHWPOISON)) {
ret = VM_FAULT_HWPOISON;
goto out_page;
} }
folio = page_folio(page); folio = page_folio(page);
......
...@@ -246,18 +246,21 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...) ...@@ -246,18 +246,21 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
} }
EXPORT_SYMBOL(shrinker_debugfs_rename); EXPORT_SYMBOL(shrinker_debugfs_rename);
void shrinker_debugfs_remove(struct shrinker *shrinker) struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
{ {
struct dentry *entry = shrinker->debugfs_entry;
lockdep_assert_held(&shrinker_rwsem); lockdep_assert_held(&shrinker_rwsem);
kfree_const(shrinker->name); kfree_const(shrinker->name);
shrinker->name = NULL; shrinker->name = NULL;
if (!shrinker->debugfs_entry) if (entry) {
return;
debugfs_remove_recursive(shrinker->debugfs_entry);
ida_free(&shrinker_debugfs_ida, shrinker->debugfs_id); ida_free(&shrinker_debugfs_ida, shrinker->debugfs_id);
shrinker->debugfs_entry = NULL;
}
return entry;
} }
static int __init shrinker_debugfs_init(void) static int __init shrinker_debugfs_init(void)
......
...@@ -1764,12 +1764,15 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1764,12 +1764,15 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
struct page *swapcache; struct page *swapcache;
spinlock_t *ptl; spinlock_t *ptl;
pte_t *pte, new_pte; pte_t *pte, new_pte;
bool hwposioned = false;
int ret = 1; int ret = 1;
swapcache = page; swapcache = page;
page = ksm_might_need_to_copy(page, vma, addr); page = ksm_might_need_to_copy(page, vma, addr);
if (unlikely(!page)) if (unlikely(!page))
return -ENOMEM; return -ENOMEM;
else if (unlikely(PTR_ERR(page) == -EHWPOISON))
hwposioned = true;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) { if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
...@@ -1777,15 +1780,19 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1777,15 +1780,19 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
goto out; goto out;
} }
if (unlikely(!PageUptodate(page))) { if (unlikely(hwposioned || !PageUptodate(page))) {
pte_t pteval; swp_entry_t swp_entry;
dec_mm_counter(vma->vm_mm, MM_SWAPENTS); dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
pteval = swp_entry_to_pte(make_swapin_error_entry()); if (hwposioned) {
set_pte_at(vma->vm_mm, addr, pte, pteval); swp_entry = make_hwpoison_entry(swapcache);
swap_free(entry); page = swapcache;
} else {
swp_entry = make_swapin_error_entry();
}
new_pte = swp_entry_to_pte(swp_entry);
ret = 0; ret = 0;
goto out; goto setpte;
} }
/* See do_swap_page() */ /* See do_swap_page() */
...@@ -1817,6 +1824,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1817,6 +1824,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
new_pte = pte_mksoft_dirty(new_pte); new_pte = pte_mksoft_dirty(new_pte);
if (pte_swp_uffd_wp(*pte)) if (pte_swp_uffd_wp(*pte))
new_pte = pte_mkuffd_wp(new_pte); new_pte = pte_mkuffd_wp(new_pte);
setpte:
set_pte_at(vma->vm_mm, addr, pte, new_pte); set_pte_at(vma->vm_mm, addr, pte, new_pte);
swap_free(entry); swap_free(entry);
out: out:
......
...@@ -741,6 +741,8 @@ EXPORT_SYMBOL(register_shrinker); ...@@ -741,6 +741,8 @@ EXPORT_SYMBOL(register_shrinker);
*/ */
void unregister_shrinker(struct shrinker *shrinker) void unregister_shrinker(struct shrinker *shrinker)
{ {
struct dentry *debugfs_entry;
if (!(shrinker->flags & SHRINKER_REGISTERED)) if (!(shrinker->flags & SHRINKER_REGISTERED))
return; return;
...@@ -749,9 +751,11 @@ void unregister_shrinker(struct shrinker *shrinker) ...@@ -749,9 +751,11 @@ void unregister_shrinker(struct shrinker *shrinker)
shrinker->flags &= ~SHRINKER_REGISTERED; shrinker->flags &= ~SHRINKER_REGISTERED;
if (shrinker->flags & SHRINKER_MEMCG_AWARE) if (shrinker->flags & SHRINKER_MEMCG_AWARE)
unregister_memcg_shrinker(shrinker); unregister_memcg_shrinker(shrinker);
shrinker_debugfs_remove(shrinker); debugfs_entry = shrinker_debugfs_remove(shrinker);
up_write(&shrinker_rwsem); up_write(&shrinker_rwsem);
debugfs_remove_recursive(debugfs_entry);
kfree(shrinker->nr_deferred); kfree(shrinker->nr_deferred);
shrinker->nr_deferred = NULL; shrinker->nr_deferred = NULL;
} }
......
...@@ -163,7 +163,7 @@ def get_current_task(cpu): ...@@ -163,7 +163,7 @@ def get_current_task(cpu):
task_ptr_type = task_type.get_type().pointer() task_ptr_type = task_type.get_type().pointer()
if utils.is_target_arch("x86"): if utils.is_target_arch("x86"):
var_ptr = gdb.parse_and_eval("&current_task") var_ptr = gdb.parse_and_eval("&pcpu_hot.current_task")
return per_cpu(var_ptr, cpu).dereference() return per_cpu(var_ptr, cpu).dereference()
elif utils.is_target_arch("aarch64"): elif utils.is_target_arch("aarch64"):
current_task_addr = gdb.parse_and_eval("$SP_EL0") current_task_addr = gdb.parse_and_eval("$SP_EL0")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment