Commit b6305049 authored by Mike Kravetz's avatar Mike Kravetz Committed by Andrew Morton

ipc/shm: call underlying open/close vm_ops

Shared memory segments can be created that are backed by hugetlb pages. 
When this happens, the vmas associated with any mappings (shmat) are
marked VM_HUGETLB, yet the vm_ops for such mappings are provided by
ipc/shm (shm_vm_ops).  There is a mechanism to call the underlying hugetlb
vm_ops, and this is done for most operations.  However, it is not done for
open and close.

This was not an issue until the introduction of the hugetlb vma_lock. 
This lock structure is pointed to by vm_private_data and the open/close
vm_ops help maintain this structure.  The special hugetlb routine called
at fork took care of structure updates at fork time.  However,
vma_splitting is not properly handled for ipc shared memory mappings
backed by hugetlb pages.  This can result in a "kernel NULL pointer
dereference" BUG or use after free as two vmas point to the same lock
structure.

Update the shm open and close routines to always call the underlying open
and close routines.

Link: https://lkml.kernel.org/r/20221114210018.49346-1-mike.kravetz@oracle.com
Fixes: 8d9bfb26 ("hugetlb: add vma based lock for pmd sharing")
Signed-off-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reported-by: default avatarDoug Nelson <doug.nelson@intel.com>
Reported-by: <syzbot+83b4134621b7c326d950@syzkaller.appspotmail.com>
Cc: Alexander Mikhalitsyn <alexander.mikhalitsyn@virtuozzo.com>
Cc: "Eric W . Biederman" <ebiederm@xmission.com>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a6f810ef
...@@ -275,10 +275,8 @@ static inline void shm_rmid(struct shmid_kernel *s) ...@@ -275,10 +275,8 @@ static inline void shm_rmid(struct shmid_kernel *s)
} }
static int __shm_open(struct vm_area_struct *vma) static int __shm_open(struct shm_file_data *sfd)
{ {
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
struct shmid_kernel *shp; struct shmid_kernel *shp;
shp = shm_lock(sfd->ns, sfd->id); shp = shm_lock(sfd->ns, sfd->id);
...@@ -302,7 +300,15 @@ static int __shm_open(struct vm_area_struct *vma) ...@@ -302,7 +300,15 @@ static int __shm_open(struct vm_area_struct *vma)
/* This is called by fork, once for every shm attach. */ /* This is called by fork, once for every shm attach. */
static void shm_open(struct vm_area_struct *vma) static void shm_open(struct vm_area_struct *vma)
{ {
int err = __shm_open(vma); struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
int err;
/* Always call underlying open if present */
if (sfd->vm_ops->open)
sfd->vm_ops->open(vma);
err = __shm_open(sfd);
/* /*
* We raced in the idr lookup or with shm_destroy(). * We raced in the idr lookup or with shm_destroy().
* Either way, the ID is busted. * Either way, the ID is busted.
...@@ -359,10 +365,8 @@ static bool shm_may_destroy(struct shmid_kernel *shp) ...@@ -359,10 +365,8 @@ static bool shm_may_destroy(struct shmid_kernel *shp)
* The descriptor has already been removed from the current->mm->mmap list * The descriptor has already been removed from the current->mm->mmap list
* and will later be kfree()d. * and will later be kfree()d.
*/ */
static void shm_close(struct vm_area_struct *vma) static void __shm_close(struct shm_file_data *sfd)
{ {
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
struct shmid_kernel *shp; struct shmid_kernel *shp;
struct ipc_namespace *ns = sfd->ns; struct ipc_namespace *ns = sfd->ns;
...@@ -388,6 +392,18 @@ static void shm_close(struct vm_area_struct *vma) ...@@ -388,6 +392,18 @@ static void shm_close(struct vm_area_struct *vma)
up_write(&shm_ids(ns).rwsem); up_write(&shm_ids(ns).rwsem);
} }
static void shm_close(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
/* Always call underlying close if present */
if (sfd->vm_ops->close)
sfd->vm_ops->close(vma);
__shm_close(sfd);
}
/* Called with ns->shm_ids(ns).rwsem locked */ /* Called with ns->shm_ids(ns).rwsem locked */
static int shm_try_destroy_orphaned(int id, void *p, void *data) static int shm_try_destroy_orphaned(int id, void *p, void *data)
{ {
...@@ -583,13 +599,13 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -583,13 +599,13 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
* IPC ID that was removed, and possibly even reused by another shm * IPC ID that was removed, and possibly even reused by another shm
* segment already. Propagate this case as an error to caller. * segment already. Propagate this case as an error to caller.
*/ */
ret = __shm_open(vma); ret = __shm_open(sfd);
if (ret) if (ret)
return ret; return ret;
ret = call_mmap(sfd->file, vma); ret = call_mmap(sfd->file, vma);
if (ret) { if (ret) {
shm_close(vma); __shm_close(sfd);
return ret; return ret;
} }
sfd->vm_ops = vma->vm_ops; sfd->vm_ops = vma->vm_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment