Commit 6240c2c4 authored by Thomas Hellström's avatar Thomas Hellström

drm/xe: Document nested struct members according to guidelines

Document nested struct members with full names as described in
Documentation/doc-guide/kernel-doc.rst.

For this documentation we allow a column width of 100 to make
it more readable.

This fixes warnings similar to:
drivers/gpu/drm/xe/xe_lrc_types.h:45: warning: Excess struct member 'size' description in 'xe_lrc'

v2:
- Only change the documentation, not the member.

v3:
- Fix the commit message wording.

Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240123153147.27305-1-thomas.hellstrom@linux.intel.com
parent 02c4e64a
This diff is collapsed.
......@@ -109,9 +109,9 @@ struct xe_exec_queue {
* @persistent: persistent exec queue state
*/
struct {
/** @xef: file which this exec queue belongs to */
/** @persistent.xef: file which this exec queue belongs to */
struct xe_file *xef;
/** @link: link in list of persistent exec queues */
/** @persisiten.link: link in list of persistent exec queues */
struct list_head link;
} persistent;
......@@ -120,55 +120,55 @@ struct xe_exec_queue {
* @parallel: parallel submission state
*/
struct {
/** @composite_fence_ctx: context composite fence */
/** @parallel.composite_fence_ctx: context composite fence */
u64 composite_fence_ctx;
/** @composite_fence_seqno: seqno for composite fence */
/** @parallel.composite_fence_seqno: seqno for composite fence */
u32 composite_fence_seqno;
} parallel;
/**
* @bind: bind submission state
*/
struct {
/** @fence_ctx: context bind fence */
/** @bind.fence_ctx: context bind fence */
u64 fence_ctx;
/** @fence_seqno: seqno for bind fence */
/** @bind.fence_seqno: seqno for bind fence */
u32 fence_seqno;
} bind;
};
/** @sched_props: scheduling properties */
struct {
/** @timeslice_us: timeslice period in micro-seconds */
/** @sched_props.timeslice_us: timeslice period in micro-seconds */
u32 timeslice_us;
/** @preempt_timeout_us: preemption timeout in micro-seconds */
/** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */
u32 preempt_timeout_us;
/** @job_timeout_ms: job timeout in milliseconds */
/** @sched_props.job_timeout_ms: job timeout in milliseconds */
u32 job_timeout_ms;
/** @priority: priority of this exec queue */
/** @sched_props.priority: priority of this exec queue */
enum xe_exec_queue_priority priority;
} sched_props;
/** @compute: compute exec queue state */
struct {
/** @pfence: preemption fence */
/** @compute.pfence: preemption fence */
struct dma_fence *pfence;
/** @context: preemption fence context */
/** @compute.context: preemption fence context */
u64 context;
/** @seqno: preemption fence seqno */
/** @compute.seqno: preemption fence seqno */
u32 seqno;
/** @link: link into VM's list of exec queues */
/** @compute.link: link into VM's list of exec queues */
struct list_head link;
/** @lock: preemption fences lock */
/** @compute.lock: preemption fences lock */
spinlock_t lock;
} compute;
/** @usm: unified shared memory state */
struct {
/** @acc_trigger: access counter trigger */
/** @usm.acc_trigger: access counter trigger */
u32 acc_trigger;
/** @acc_notify: access counter notify */
/** @usm.acc_notify: access counter notify */
u32 acc_notify;
/** @acc_granularity: access counter granularity */
/** @usm.acc_granularity: access counter granularity */
u32 acc_granularity;
} usm;
......
......@@ -50,21 +50,21 @@ struct xe_gsc {
/** @proxy: sub-structure containing the SW proxy-related variables */
struct {
/** @component: struct for communication with mei component */
/** @proxy.component: struct for communication with mei component */
struct i915_gsc_proxy_component *component;
/** @mutex: protects the component binding and usage */
/** @proxy.mutex: protects the component binding and usage */
struct mutex mutex;
/** @component_added: whether the component has been added */
/** @proxy.component_added: whether the component has been added */
bool component_added;
/** @bo: object to store message to and from the GSC */
/** @proxy.bo: object to store message to and from the GSC */
struct xe_bo *bo;
/** @to_gsc: map of the memory used to send messages to the GSC */
/** @proxy.to_gsc: map of the memory used to send messages to the GSC */
struct iosys_map to_gsc;
/** @from_gsc: map of the memory used to recv messages from the GSC */
/** @proxy.from_gsc: map of the memory used to recv messages from the GSC */
struct iosys_map from_gsc;
/** @to_csme: pointer to the memory used to send messages to CSME */
/** @proxy.to_csme: pointer to the memory used to send messages to CSME */
void *to_csme;
/** @from_csme: pointer to the memory used to recv messages from CSME */
/** @proxy.from_csme: pointer to the memory used to recv messages from CSME */
void *from_csme;
} proxy;
};
......
......@@ -103,16 +103,16 @@ struct xe_gt {
/** @info: GT info */
struct {
/** @type: type of GT */
/** @info.type: type of GT */
enum xe_gt_type type;
/** @id: Unique ID of this GT within the PCI Device */
/** @info.id: Unique ID of this GT within the PCI Device */
u8 id;
/** @reference_clock: clock frequency */
/** @info.reference_clock: clock frequency */
u32 reference_clock;
/** @engine_mask: mask of engines present on GT */
/** @info.engine_mask: mask of engines present on GT */
u64 engine_mask;
/**
* @__engine_mask: mask of engines present on GT read from
* @info.__engine_mask: mask of engines present on GT read from
* xe_pci.c, used to fake reading the engine_mask from the
* hwconfig blob.
*/
......@@ -125,14 +125,14 @@ struct xe_gt {
* specific offset, as well as their own forcewake handling.
*/
struct {
/** @fw: force wake for GT */
/** @mmio.fw: force wake for GT */
struct xe_force_wake fw;
/**
* @adj_limit: adjust MMIO address if address is below this
* @mmio.adj_limit: adjust MMIO address if address is below this
* value
*/
u32 adj_limit;
/** @adj_offset: offect to add to MMIO address when adjusting */
/** @mmio.adj_offset: offect to add to MMIO address when adjusting */
u32 adj_offset;
} mmio;
......@@ -144,7 +144,7 @@ struct xe_gt {
/** @reset: state for GT resets */
struct {
/**
* @worker: work so GT resets can done async allowing to reset
* @reset.worker: work so GT resets can done async allowing to reset
* code to safely flush all code paths
*/
struct work_struct worker;
......@@ -152,36 +152,37 @@ struct xe_gt {
/** @tlb_invalidation: TLB invalidation state */
struct {
/** @seqno: TLB invalidation seqno, protected by CT lock */
/** @tlb_invalidation.seqno: TLB invalidation seqno, protected by CT lock */
#define TLB_INVALIDATION_SEQNO_MAX 0x100000
int seqno;
/**
* @seqno_recv: last received TLB invalidation seqno, protected by CT lock
* @tlb_invalidation.seqno_recv: last received TLB invalidation seqno,
* protected by CT lock
*/
int seqno_recv;
/**
* @pending_fences: list of pending fences waiting TLB
* @tlb_invalidation.pending_fences: list of pending fences waiting TLB
* invaliations, protected by CT lock
*/
struct list_head pending_fences;
/**
* @pending_lock: protects @pending_fences and updating
* @seqno_recv.
* @tlb_invalidation.pending_lock: protects @tlb_invalidation.pending_fences
* and updating @tlb_invalidation.seqno_recv.
*/
spinlock_t pending_lock;
/**
* @fence_tdr: schedules a delayed call to
* @tlb_invalidation.fence_tdr: schedules a delayed call to
* xe_gt_tlb_fence_timeout after the timeut interval is over.
*/
struct delayed_work fence_tdr;
/** @fence_context: context for TLB invalidation fences */
/** @tlb_invalidation.fence_context: context for TLB invalidation fences */
u64 fence_context;
/**
* @fence_seqno: seqno to TLB invalidation fences, protected by
* @tlb_invalidation.fence_seqno: seqno to TLB invalidation fences, protected by
* tlb_invalidation.lock
*/
u32 fence_seqno;
/** @lock: protects TLB invalidation fences */
/** @tlb_invalidation.lock: protects TLB invalidation fences */
spinlock_t lock;
} tlb_invalidation;
......@@ -196,7 +197,7 @@ struct xe_gt {
/** @usm: unified shared memory state */
struct {
/**
* @bb_pool: Pool from which batchbuffers, for USM operations
* @usm.bb_pool: Pool from which batchbuffers, for USM operations
* (e.g. migrations, fixing page tables), are allocated.
* Dedicated pool needed so USM operations to not get blocked
* behind any user operations which may have resulted in a
......@@ -204,67 +205,67 @@ struct xe_gt {
*/
struct xe_sa_manager *bb_pool;
/**
* @reserved_bcs_instance: reserved BCS instance used for USM
* @usm.reserved_bcs_instance: reserved BCS instance used for USM
* operations (e.g. mmigrations, fixing page tables)
*/
u16 reserved_bcs_instance;
/** @pf_wq: page fault work queue, unbound, high priority */
/** @usm.pf_wq: page fault work queue, unbound, high priority */
struct workqueue_struct *pf_wq;
/** @acc_wq: access counter work queue, unbound, high priority */
/** @usm.acc_wq: access counter work queue, unbound, high priority */
struct workqueue_struct *acc_wq;
/**
* @pf_queue: Page fault queue used to sync faults so faults can
* @usm.pf_queue: Page fault queue used to sync faults so faults can
* be processed not under the GuC CT lock. The queue is sized so
* it can sync all possible faults (1 per physical engine).
* Multiple queues exists for page faults from different VMs are
* be processed in parallel.
*/
struct pf_queue {
/** @gt: back pointer to GT */
/** @usm.pf_queue.gt: back pointer to GT */
struct xe_gt *gt;
#define PF_QUEUE_NUM_DW 128
/** @data: data in the page fault queue */
/** @usm.pf_queue.data: data in the page fault queue */
u32 data[PF_QUEUE_NUM_DW];
/**
* @tail: tail pointer in DWs for page fault queue,
* @usm.pf_queue.tail: tail pointer in DWs for page fault queue,
* moved by worker which processes faults (consumer).
*/
u16 tail;
/**
* @head: head pointer in DWs for page fault queue,
* @usm.pf_queue.head: head pointer in DWs for page fault queue,
* moved by G2H handler (producer).
*/
u16 head;
/** @lock: protects page fault queue */
/** @usm.pf_queue.lock: protects page fault queue */
spinlock_t lock;
/** @worker: to process page faults */
/** @usm.pf_queue.worker: to process page faults */
struct work_struct worker;
#define NUM_PF_QUEUE 4
} pf_queue[NUM_PF_QUEUE];
/**
* @acc_queue: Same as page fault queue, cannot process access
* @usm.acc_queue: Same as page fault queue, cannot process access
* counters under CT lock.
*/
struct acc_queue {
/** @gt: back pointer to GT */
/** @usm.acc_queue.gt: back pointer to GT */
struct xe_gt *gt;
#define ACC_QUEUE_NUM_DW 128
/** @data: data in the page fault queue */
/** @usm.acc_queue.data: data in the page fault queue */
u32 data[ACC_QUEUE_NUM_DW];
/**
* @tail: tail pointer in DWs for access counter queue,
* @usm.acc_queue.tail: tail pointer in DWs for access counter queue,
* moved by worker which processes counters
* (consumer).
*/
u16 tail;
/**
* @head: head pointer in DWs for access counter queue,
* @usm.acc_queue.head: head pointer in DWs for access counter queue,
* moved by G2H handler (producer).
*/
u16 head;
/** @lock: protects page fault queue */
/** @usm.acc_queue.lock: protects page fault queue */
spinlock_t lock;
/** @worker: to process access counters */
/** @usm.acc_queue.worker: to process access counters */
struct work_struct worker;
#define NUM_ACC_QUEUE 4
} acc_queue[NUM_ACC_QUEUE];
......@@ -301,7 +302,7 @@ struct xe_gt {
/** @pcode: GT's PCODE */
struct {
/** @lock: protecting GT's PCODE mailbox data */
/** @pcode.lock: protecting GT's PCODE mailbox data */
struct mutex lock;
} pcode;
......@@ -313,32 +314,32 @@ struct xe_gt {
/** @mocs: info */
struct {
/** @uc_index: UC index */
/** @mocs.uc_index: UC index */
u8 uc_index;
/** @wb_index: WB index, only used on L3_CCS platforms */
/** @mocs.wb_index: WB index, only used on L3_CCS platforms */
u8 wb_index;
} mocs;
/** @fuse_topo: GT topology reported by fuse registers */
struct {
/** @g_dss_mask: dual-subslices usable by geometry */
/** @fuse_topo.g_dss_mask: dual-subslices usable by geometry */
xe_dss_mask_t g_dss_mask;
/** @c_dss_mask: dual-subslices usable by compute */
/** @fuse_topo.c_dss_mask: dual-subslices usable by compute */
xe_dss_mask_t c_dss_mask;
/** @eu_mask_per_dss: EU mask per DSS*/
/** @fuse_topo.eu_mask_per_dss: EU mask per DSS*/
xe_eu_mask_t eu_mask_per_dss;
} fuse_topo;
/** @steering: register steering for individual HW units */
struct {
/* @ranges: register ranges used for this steering type */
/** @steering.ranges: register ranges used for this steering type */
const struct xe_mmio_range *ranges;
/** @group_target: target to steer accesses to */
/** @steering.group_target: target to steer accesses to */
u16 group_target;
/** @instance_target: instance to steer accesses to */
/** @steering.instance_target: instance to steer accesses to */
u16 instance_target;
} steering[NUM_STEERING_TYPES];
......@@ -350,13 +351,13 @@ struct xe_gt {
/** @wa_active: keep track of active workarounds */
struct {
/** @gt: bitmap with active GT workarounds */
/** @wa_active.gt: bitmap with active GT workarounds */
unsigned long *gt;
/** @engine: bitmap with active engine workarounds */
/** @wa_active.engine: bitmap with active engine workarounds */
unsigned long *engine;
/** @lrc: bitmap with active LRC workarounds */
/** @wa_active.lrc: bitmap with active LRC workarounds */
unsigned long *lrc;
/** @oob: bitmap with active OOB workaroudns */
/** @wa_active.oob: bitmap with active OOB workaroudns */
unsigned long *oob;
} wa_active;
};
......
......@@ -87,9 +87,9 @@ struct xe_guc_ct {
spinlock_t fast_lock;
/** @ctbs: buffers for sending and receiving commands */
struct {
/** @send: Host to GuC (H2G, send) channel */
/** @ctbs.send: Host to GuC (H2G, send) channel */
struct guc_ctb h2g;
/** @recv: GuC to Host (G2H, receive) channel */
/** @ctbs.recv: GuC to Host (G2H, receive) channel */
struct guc_ctb g2h;
} ctbs;
/** @g2h_outstanding: number of outstanding G2H */
......
......@@ -102,9 +102,9 @@ struct xe_guc_submit_exec_queue_snapshot {
/** @sched_props: scheduling properties */
struct {
/** @timeslice_us: timeslice period in micro-seconds */
/** @sched_props.timeslice_us: timeslice period in micro-seconds */
u32 timeslice_us;
/** @preempt_timeout_us: preemption timeout in micro-seconds */
/** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */
u32 preempt_timeout_us;
} sched_props;
......@@ -118,11 +118,11 @@ struct xe_guc_submit_exec_queue_snapshot {
/** @guc: GuC Engine Snapshot */
struct {
/** @wqi_head: work queue item head */
/** @guc.wqi_head: work queue item head */
u32 wqi_head;
/** @wqi_tail: work queue item tail */
/** @guc.wqi_tail: work queue item tail */
u32 wqi_tail;
/** @id: GuC id for this exec_queue */
/** @guc.id: GuC id for this exec_queue */
u16 id;
} guc;
......@@ -133,13 +133,13 @@ struct xe_guc_submit_exec_queue_snapshot {
bool parallel_execution;
/** @parallel: snapshot of the useful parallel scratch */
struct {
/** @wq_desc: Workqueue description */
/** @parallel.wq_desc: Workqueue description */
struct {
/** @head: Workqueue Head */
/** @parallel.wq_desc.head: Workqueue Head */
u32 head;
/** @tail: Workqueue Tail */
/** @parallel.wq_desc.tail: Workqueue Tail */
u32 tail;
/** @status: Workqueue Status */
/** @parallel.wq_desc.status: Workqueue Status */
u32 status;
} wq_desc;
/** @wq: Workqueue Items */
......
......@@ -49,40 +49,40 @@ struct xe_guc {
struct xe_guc_db_mgr dbm;
/** @submission_state: GuC submission state */
struct {
/** @exec_queue_lookup: Lookup an xe_engine from guc_id */
/** @submission_state.exec_queue_lookup: Lookup an xe_engine from guc_id */
struct xarray exec_queue_lookup;
/** @guc_ids: used to allocate new guc_ids, single-lrc */
/** @submission_state.guc_ids: used to allocate new guc_ids, single-lrc */
struct ida guc_ids;
/** @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */
/** @submission_state.guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */
unsigned long *guc_ids_bitmap;
/** @stopped: submissions are stopped */
/** @submission_state.stopped: submissions are stopped */
atomic_t stopped;
/** @lock: protects submission state */
/** @submission_state.lock: protects submission state */
struct mutex lock;
/** @suspend: suspend fence state */
/** @submission_state.suspend: suspend fence state */
struct {
/** @lock: suspend fences lock */
/** @submission_state.suspend.lock: suspend fences lock */
spinlock_t lock;
/** @context: suspend fences context */
/** @submission_state.suspend.context: suspend fences context */
u64 context;
/** @seqno: suspend fences seqno */
/** @submission_state.suspend.seqno: suspend fences seqno */
u32 seqno;
} suspend;
#ifdef CONFIG_PROVE_LOCKING
#define NUM_SUBMIT_WQ 256
/** @submit_wq_pool: submission ordered workqueues pool */
/** @submission_state.submit_wq_pool: submission ordered workqueues pool */
struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ];
/** @submit_wq_idx: submission ordered workqueue index */
/** @submission_state.submit_wq_idx: submission ordered workqueue index */
int submit_wq_idx;
#endif
/** @enabled: submission is enabled */
/** @submission_state.enabled: submission is enabled */
bool enabled;
} submission_state;
/** @hwconfig: Hardware config state */
struct {
/** @bo: buffer object of the hardware config */
/** @hwconfig.bo: buffer object of the hardware config */
struct xe_bo *bo;
/** @size: size of the hardware config */
/** @hwconfig.size: size of the hardware config */
u32 size;
} hwconfig;
......
......@@ -79,23 +79,23 @@ struct xe_hw_engine_class_intf {
* @defaults: default scheduling properties
*/
struct {
/** @set_job_timeout: Set job timeout in ms for engine */
/** @sched_props.set_job_timeout: Set job timeout in ms for engine */
u32 job_timeout_ms;
/** @job_timeout_min: Min job timeout in ms for engine */
/** @sched_props.job_timeout_min: Min job timeout in ms for engine */
u32 job_timeout_min;
/** @job_timeout_max: Max job timeout in ms for engine */
/** @sched_props.job_timeout_max: Max job timeout in ms for engine */
u32 job_timeout_max;
/** @timeslice_us: timeslice period in micro-seconds */
/** @sched_props.timeslice_us: timeslice period in micro-seconds */
u32 timeslice_us;
/** @timeslice_min: min timeslice period in micro-seconds */
/** @sched_props.timeslice_min: min timeslice period in micro-seconds */
u32 timeslice_min;
/** @timeslice_max: max timeslice period in micro-seconds */
/** @sched_props.timeslice_max: max timeslice period in micro-seconds */
u32 timeslice_max;
/** @preempt_timeout_us: preemption timeout in micro-seconds */
/** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */
u32 preempt_timeout_us;
/** @preempt_timeout_min: min preemption timeout in micro-seconds */
/** @sched_props.preempt_timeout_min: min preemption timeout in micro-seconds */
u32 preempt_timeout_min;
/** @preempt_timeout_max: max preemption timeout in micro-seconds */
/** @sched_props.preempt_timeout_max: max preemption timeout in micro-seconds */
u32 preempt_timeout_max;
} sched_props, defaults;
};
......@@ -164,62 +164,62 @@ struct xe_hw_engine_snapshot {
u16 logical_instance;
/** @forcewake: Force Wake information snapshot */
struct {
/** @domain: force wake domain of this hw engine */
/** @forcewake.domain: force wake domain of this hw engine */
enum xe_force_wake_domains domain;
/** @ref: Forcewake ref for the above domain */
/** @forcewake.ref: Forcewake ref for the above domain */
int ref;
} forcewake;
/** @mmio_base: MMIO base address of this hw engine*/
u32 mmio_base;
/** @reg: Useful MMIO register snapshot */
struct {
/** @ring_hwstam: RING_HWSTAM */
/** @reg.ring_hwstam: RING_HWSTAM */
u32 ring_hwstam;
/** @ring_hws_pga: RING_HWS_PGA */
/** @reg.ring_hws_pga: RING_HWS_PGA */
u32 ring_hws_pga;
/** @ring_execlist_status_lo: RING_EXECLIST_STATUS_LO */
/** @reg.ring_execlist_status_lo: RING_EXECLIST_STATUS_LO */
u32 ring_execlist_status_lo;
/** @ring_execlist_status_hi: RING_EXECLIST_STATUS_HI */
/** @reg.ring_execlist_status_hi: RING_EXECLIST_STATUS_HI */
u32 ring_execlist_status_hi;
/** @ring_execlist_sq_contents_lo: RING_EXECLIST_SQ_CONTENTS */
/** @reg.ring_execlist_sq_contents_lo: RING_EXECLIST_SQ_CONTENTS */
u32 ring_execlist_sq_contents_lo;
/** @ring_execlist_sq_contents_hi: RING_EXECLIST_SQ_CONTENTS + 4 */
/** @reg.ring_execlist_sq_contents_hi: RING_EXECLIST_SQ_CONTENTS + 4 */
u32 ring_execlist_sq_contents_hi;
/** @ring_start: RING_START */
/** @reg.ring_start: RING_START */
u32 ring_start;
/** @ring_head: RING_HEAD */
/** @reg.ring_head: RING_HEAD */
u32 ring_head;
/** @ring_tail: RING_TAIL */
/** @reg.ring_tail: RING_TAIL */
u32 ring_tail;
/** @ring_ctl: RING_CTL */
/** @reg.ring_ctl: RING_CTL */
u32 ring_ctl;
/** @ring_mi_mode: RING_MI_MODE */
/** @reg.ring_mi_mode: RING_MI_MODE */
u32 ring_mi_mode;
/** @ring_mode: RING_MODE */
/** @reg.ring_mode: RING_MODE */
u32 ring_mode;
/** @ring_imr: RING_IMR */
/** @reg.ring_imr: RING_IMR */
u32 ring_imr;
/** @ring_esr: RING_ESR */
/** @reg.ring_esr: RING_ESR */
u32 ring_esr;
/** @ring_emr: RING_EMR */
/** @reg.ring_emr: RING_EMR */
u32 ring_emr;
/** @ring_eir: RING_EIR */
/** @reg.ring_eir: RING_EIR */
u32 ring_eir;
/** @ring_acthd_udw: RING_ACTHD_UDW */
/** @reg.ring_acthd_udw: RING_ACTHD_UDW */
u32 ring_acthd_udw;
/** @ring_acthd: RING_ACTHD */
/** @reg.ring_acthd: RING_ACTHD */
u32 ring_acthd;
/** @ring_bbaddr_udw: RING_BBADDR_UDW */
/** @reg.ring_bbaddr_udw: RING_BBADDR_UDW */
u32 ring_bbaddr_udw;
/** @ring_bbaddr: RING_BBADDR */
/** @reg.ring_bbaddr: RING_BBADDR */
u32 ring_bbaddr;
/** @ring_dma_fadd_udw: RING_DMA_FADD_UDW */
/** @reg.ring_dma_fadd_udw: RING_DMA_FADD_UDW */
u32 ring_dma_fadd_udw;
/** @ring_dma_fadd: RING_DMA_FADD */
/** @reg.ring_dma_fadd: RING_DMA_FADD */
u32 ring_dma_fadd;
/** @ipehr: IPEHR */
/** @reg.ipehr: IPEHR */
u32 ipehr;
/** @rcu_mode: RCU_MODE */
/** @reg.rcu_mode: RCU_MODE */
u32 rcu_mode;
} reg;
};
......
......@@ -28,11 +28,11 @@ struct xe_lrc {
/** @ring: submission ring state */
struct {
/** @size: size of submission ring */
/** @ring.size: size of submission ring */
u32 size;
/** @tail: tail of submission ring */
/** @ring.tail: tail of submission ring */
u32 tail;
/** @old_tail: shadow of tail */
/** @ring.old_tail: shadow of tail */
u32 old_tail;
} ring;
......
......@@ -30,11 +30,11 @@ struct xe_sched_job {
struct dma_fence *fence;
/** @user_fence: write back value when BB is complete */
struct {
/** @used: user fence is used */
/** @user_fence.used: user fence is used */
bool used;
/** @addr: address to write to */
/** @user_fence.addr: address to write to */
u64 addr;
/** @value: write back value */
/** @user_fence.value: write back value */
u64 value;
} user_fence;
/** @migrate_flush_flags: Additional flush flags for migration jobs */
......
......@@ -124,11 +124,14 @@ struct xe_uc_fw {
/** @versions: FW versions wanted and found */
struct {
/** @wanted: firmware version wanted by platform */
/** @versions.wanted: firmware version wanted by platform */
struct xe_uc_fw_version wanted;
/** @wanted_type: type of firmware version wanted (release vs compatibility) */
/**
* @versions.wanted_type: type of firmware version wanted
* (release vs compatibility)
*/
enum xe_uc_fw_version_types wanted_type;
/** @found: fw versions found in firmware blob */
/** @versions.found: fw versions found in firmware blob */
struct xe_uc_fw_version found[XE_UC_FW_VER_TYPE_COUNT];
} versions;
......
......@@ -16,9 +16,9 @@ struct xe_wopcm {
u32 size;
/** @guc: GuC WOPCM Region info */
struct {
/** @base: GuC WOPCM base which is offset from WOPCM base */
/** @guc.base: GuC WOPCM base which is offset from WOPCM base */
u32 base;
/** @size: Size of the GuC WOPCM region */
/** @guc.size: Size of the GuC WOPCM region */
u32 size;
} guc;
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment