Commit 313c4281 authored by Wedson Almeida Filho's avatar Wedson Almeida Filho Committed by Miguel Ojeda

rust: add basic `Task`

It is an abstraction for C's `struct task_struct`. It implements
`AlwaysRefCounted`, so the refcount of the wrapped object is managed
safely on the Rust side.

Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Reviewed-by: default avatarMartin Rodriguez Reboredo <yakoyoku@gmail.com>
Signed-off-by: default avatarWedson Almeida Filho <walmeida@microsoft.com>
Link: https://lore.kernel.org/r/20230411054543.21278-9-wedsonaf@gmail.comSigned-off-by: default avatarMiguel Ojeda <ojeda@kernel.org>
parent f1fbd6a8
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/refcount.h> #include <linux/refcount.h>
#include <linux/sched.h>
/* `bindgen` gets confused at certain things. */ /* `bindgen` gets confused at certain things. */
const gfp_t BINDINGS_GFP_KERNEL = GFP_KERNEL; const gfp_t BINDINGS_GFP_KERNEL = GFP_KERNEL;
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/refcount.h> #include <linux/refcount.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/sched/signal.h>
__noreturn void rust_helper_BUG(void) __noreturn void rust_helper_BUG(void)
{ {
...@@ -60,6 +61,12 @@ void rust_helper_spin_unlock(spinlock_t *lock) ...@@ -60,6 +61,12 @@ void rust_helper_spin_unlock(spinlock_t *lock)
} }
EXPORT_SYMBOL_GPL(rust_helper_spin_unlock); EXPORT_SYMBOL_GPL(rust_helper_spin_unlock);
int rust_helper_signal_pending(struct task_struct *t)
{
return signal_pending(t);
}
EXPORT_SYMBOL_GPL(rust_helper_signal_pending);
refcount_t rust_helper_REFCOUNT_INIT(int n) refcount_t rust_helper_REFCOUNT_INIT(int n)
{ {
return (refcount_t)REFCOUNT_INIT(n); return (refcount_t)REFCOUNT_INIT(n);
...@@ -96,6 +103,18 @@ long rust_helper_PTR_ERR(__force const void *ptr) ...@@ -96,6 +103,18 @@ long rust_helper_PTR_ERR(__force const void *ptr)
} }
EXPORT_SYMBOL_GPL(rust_helper_PTR_ERR); EXPORT_SYMBOL_GPL(rust_helper_PTR_ERR);
void rust_helper_get_task_struct(struct task_struct *t)
{
get_task_struct(t);
}
EXPORT_SYMBOL_GPL(rust_helper_get_task_struct);
void rust_helper_put_task_struct(struct task_struct *t)
{
put_task_struct(t);
}
EXPORT_SYMBOL_GPL(rust_helper_put_task_struct);
/* /*
* We use `bindgen`'s `--size_t-is-usize` option to bind the C `size_t` type * We use `bindgen`'s `--size_t-is-usize` option to bind the C `size_t` type
* as the Rust `usize` type, so we can use it in contexts where Rust * as the Rust `usize` type, so we can use it in contexts where Rust
......
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
pub mod std_vendor; pub mod std_vendor;
pub mod str; pub mod str;
pub mod sync; pub mod sync;
pub mod task;
pub mod types; pub mod types;
#[doc(hidden)] #[doc(hidden)]
......
// SPDX-License-Identifier: GPL-2.0
//! Tasks (threads and processes).
//!
//! C header: [`include/linux/sched.h`](../../../../include/linux/sched.h).
use crate::{bindings, types::Opaque};
use core::ptr;
/// Wraps the kernel's `struct task_struct`.
///
/// # Invariants
///
/// All instances are valid tasks created by the C portion of the kernel.
///
/// Instances of this type are always ref-counted, that is, a call to `get_task_struct` ensures
/// that the allocation remains valid at least until the matching call to `put_task_struct`.
#[repr(transparent)]
pub struct Task(pub(crate) Opaque<bindings::task_struct>);
// SAFETY: It's OK to access `Task` through references from other threads because we're either
// accessing properties that don't change (e.g., `pid`, `group_leader`) or that are properly
// synchronised by C code (e.g., `signal_pending`).
unsafe impl Sync for Task {}
/// The type of process identifiers (PIDs).
type Pid = bindings::pid_t;
impl Task {
/// Returns the group leader of the given task.
pub fn group_leader(&self) -> &Task {
// SAFETY: By the type invariant, we know that `self.0` is a valid task. Valid tasks always
// have a valid group_leader.
let ptr = unsafe { *ptr::addr_of!((*self.0.get()).group_leader) };
// SAFETY: The lifetime of the returned task reference is tied to the lifetime of `self`,
// and given that a task has a reference to its group leader, we know it must be valid for
// the lifetime of the returned task reference.
unsafe { &*ptr.cast() }
}
/// Returns the PID of the given task.
pub fn pid(&self) -> Pid {
// SAFETY: By the type invariant, we know that `self.0` is a valid task. Valid tasks always
// have a valid pid.
unsafe { *ptr::addr_of!((*self.0.get()).pid) }
}
/// Determines whether the given task has pending signals.
pub fn signal_pending(&self) -> bool {
// SAFETY: By the type invariant, we know that `self.0` is valid.
unsafe { bindings::signal_pending(self.0.get()) != 0 }
}
/// Wakes up the task.
pub fn wake_up(&self) {
// SAFETY: By the type invariant, we know that `self.0.get()` is non-null and valid.
// And `wake_up_process` is safe to be called for any valid task, even if the task is
// running.
unsafe { bindings::wake_up_process(self.0.get()) };
}
}
// SAFETY: The type invariants guarantee that `Task` is always ref-counted.
unsafe impl crate::types::AlwaysRefCounted for Task {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference means that the refcount is nonzero.
unsafe { bindings::get_task_struct(self.0.get()) };
}
unsafe fn dec_ref(obj: ptr::NonNull<Self>) {
// SAFETY: The safety requirements guarantee that the refcount is nonzero.
unsafe { bindings::put_task_struct(obj.cast().as_ptr()) }
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment