Commit 1f5eaa90 authored by Jan Kara's avatar Jan Kara

fanotify: Avoid lost events due to ENOMEM for unlimited queues

Fanotify queues of unlimited length do not expect events can be lost.
Since these queues are used for system auditing and other security
related tasks, loosing events can even have security implications.
Currently, since the allocation is small (32-bytes), it cannot fail
however when we start accounting events in memcgs, allocation can start
failing. So avoid loosing events due to failure to allocate memory by
making event allocation use __GFP_NOFAIL.
Reviewed-by: default avatarAmir Goldstein <amir73il@gmail.com>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
parent f0c4a817
......@@ -139,23 +139,32 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
return false;
}
struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask,
struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
struct inode *inode, u32 mask,
const struct path *path)
{
struct fanotify_event_info *event;
gfp_t gfp = GFP_KERNEL;
/*
* For queues with unlimited length lost events are not expected and
* can possibly have security implications. Avoid losing events when
* memory is short.
*/
if (group->max_events == UINT_MAX)
gfp |= __GFP_NOFAIL;
if (fanotify_is_perm_event(mask)) {
struct fanotify_perm_event_info *pevent;
pevent = kmem_cache_alloc(fanotify_perm_event_cachep,
GFP_KERNEL);
pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp);
if (!pevent)
return NULL;
event = &pevent->fae;
pevent->response = 0;
goto init;
}
event = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL);
event = kmem_cache_alloc(fanotify_event_cachep, gfp);
if (!event)
return NULL;
init: __maybe_unused
......@@ -210,7 +219,7 @@ static int fanotify_handle_event(struct fsnotify_group *group,
return 0;
}
event = fanotify_alloc_event(inode, mask, data);
event = fanotify_alloc_event(group, inode, mask, data);
ret = -ENOMEM;
if (unlikely(!event))
goto finish;
......
......@@ -52,5 +52,6 @@ static inline struct fanotify_event_info *FANOTIFY_E(struct fsnotify_event *fse)
return container_of(fse, struct fanotify_event_info, fse);
}
struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask,
struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
struct inode *inode, u32 mask,
const struct path *path);
......@@ -757,7 +757,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
group->fanotify_data.user = user;
atomic_inc(&user->fanotify_listeners);
oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL);
oevent = fanotify_alloc_event(group, NULL, FS_Q_OVERFLOW, NULL);
if (unlikely(!oevent)) {
fd = -ENOMEM;
goto out_destroy_group;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment