Commit 848a4e5c authored by Luca Coelho's avatar Luca Coelho Committed by Jani Nikula

drm/i915: add a dedicated workqueue inside drm_i915_private

In order to avoid flush_scheduled_work() usage, add a dedicated
workqueue in the drm_i915_private structure.  In this way, we don't
need to use the system queue anymore.

This change is mostly mechanical and based on Tetsuo's original
patch[1].

v6 by Jani:
- Also create unordered_wq for mock device

Link: https://patchwork.freedesktop.org/series/114608/ [1]
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: default avatarJani Nikula <jani.nikula@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/c816ebe17ef08d363981942a096a586a7658a65e.1686231190.git.jani.nikula@intel.com
parent 8d208a5e
...@@ -7180,11 +7180,12 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence, ...@@ -7180,11 +7180,12 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
break; break;
case FENCE_FREE: case FENCE_FREE:
{ {
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_atomic_helper *helper = struct intel_atomic_helper *helper =
&to_i915(state->base.dev)->display.atomic_helper; &i915->display.atomic_helper;
if (llist_add(&state->freed, &helper->free_list)) if (llist_add(&state->freed, &helper->free_list))
schedule_work(&helper->free_work); queue_work(i915->unordered_wq, &helper->free_work);
break; break;
} }
} }
......
...@@ -442,7 +442,7 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915) ...@@ -442,7 +442,7 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
intel_unregister_dsm_handler(); intel_unregister_dsm_handler();
/* flush any delayed tasks or pending work */ /* flush any delayed tasks or pending work */
flush_scheduled_work(); flush_workqueue(i915->unordered_wq);
intel_hdcp_component_fini(i915); intel_hdcp_component_fini(i915);
......
...@@ -1057,7 +1057,7 @@ void intel_dmc_init(struct drm_i915_private *i915) ...@@ -1057,7 +1057,7 @@ void intel_dmc_init(struct drm_i915_private *i915)
i915->display.dmc.dmc = dmc; i915->display.dmc.dmc = dmc;
drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path); drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path);
schedule_work(&dmc->work); queue_work(i915->unordered_wq, &dmc->work);
return; return;
......
...@@ -5251,7 +5251,7 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector) ...@@ -5251,7 +5251,7 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector)
spin_lock_irq(&i915->irq_lock); spin_lock_irq(&i915->irq_lock);
i915->display.hotplug.event_bits |= BIT(encoder->hpd_pin); i915->display.hotplug.event_bits |= BIT(encoder->hpd_pin);
spin_unlock_irq(&i915->irq_lock); spin_unlock_irq(&i915->irq_lock);
queue_delayed_work(system_wq, &i915->display.hotplug.hotplug_work, 0); queue_delayed_work(i915->unordered_wq, &i915->display.hotplug.hotplug_work, 0);
} }
static const struct drm_connector_funcs intel_dp_connector_funcs = { static const struct drm_connector_funcs intel_dp_connector_funcs = {
......
...@@ -1064,6 +1064,7 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp, ...@@ -1064,6 +1064,7 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state) const struct intel_crtc_state *crtc_state)
{ {
struct intel_connector *intel_connector = intel_dp->attached_connector; struct intel_connector *intel_connector = intel_dp->attached_connector;
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) { if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) {
lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n"); lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n");
...@@ -1081,7 +1082,7 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp, ...@@ -1081,7 +1082,7 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
} }
/* Schedule a Hotplug Uevent to userspace to start modeset */ /* Schedule a Hotplug Uevent to userspace to start modeset */
schedule_work(&intel_connector->modeset_retry_work); queue_work(i915->unordered_wq, &intel_connector->modeset_retry_work);
} }
/* Perform the link training on all LTTPRs and the DPRX on a link. */ /* Perform the link training on all LTTPRs and the DPRX on a link. */
......
...@@ -111,7 +111,9 @@ static void intel_drrs_set_state(struct intel_crtc *crtc, ...@@ -111,7 +111,9 @@ static void intel_drrs_set_state(struct intel_crtc *crtc,
static void intel_drrs_schedule_work(struct intel_crtc *crtc) static void intel_drrs_schedule_work(struct intel_crtc *crtc)
{ {
mod_delayed_work(system_wq, &crtc->drrs.work, msecs_to_jiffies(1000)); struct drm_i915_private *i915 = to_i915(crtc->base.dev);
mod_delayed_work(i915->unordered_wq, &crtc->drrs.work, msecs_to_jiffies(1000));
} }
static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state) static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state)
......
...@@ -1600,7 +1600,7 @@ static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc) ...@@ -1600,7 +1600,7 @@ static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
if (READ_ONCE(fbc->underrun_detected)) if (READ_ONCE(fbc->underrun_detected))
return; return;
schedule_work(&fbc->underrun_work); queue_work(fbc->i915->unordered_wq, &fbc->underrun_work);
} }
/** /**
......
...@@ -694,7 +694,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous ...@@ -694,7 +694,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
/* Don't block our own workqueue as this can /* Don't block our own workqueue as this can
* be run in parallel with other i915.ko tasks. * be run in parallel with other i915.ko tasks.
*/ */
schedule_work(&dev_priv->display.fbdev.suspend_work); queue_work(dev_priv->unordered_wq,
&dev_priv->display.fbdev.suspend_work);
return; return;
} }
} }
......
...@@ -983,6 +983,7 @@ static void intel_hdcp_update_value(struct intel_connector *connector, ...@@ -983,6 +983,7 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
struct drm_device *dev = connector->base.dev; struct drm_device *dev = connector->base.dev;
struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp; struct intel_hdcp *hdcp = &connector->hdcp;
struct drm_i915_private *i915 = to_i915(connector->base.dev);
drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex)); drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
...@@ -1001,7 +1002,7 @@ static void intel_hdcp_update_value(struct intel_connector *connector, ...@@ -1001,7 +1002,7 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
hdcp->value = value; hdcp->value = value;
if (update_property) { if (update_property) {
drm_connector_get(&connector->base); drm_connector_get(&connector->base);
schedule_work(&hdcp->prop_work); queue_work(i915->unordered_wq, &hdcp->prop_work);
} }
} }
...@@ -2090,16 +2091,17 @@ static void intel_hdcp_check_work(struct work_struct *work) ...@@ -2090,16 +2091,17 @@ static void intel_hdcp_check_work(struct work_struct *work)
struct intel_hdcp, struct intel_hdcp,
check_work); check_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp); struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
if (drm_connector_is_unregistered(&connector->base)) if (drm_connector_is_unregistered(&connector->base))
return; return;
if (!intel_hdcp2_check_link(connector)) if (!intel_hdcp2_check_link(connector))
schedule_delayed_work(&hdcp->check_work, queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
DRM_HDCP2_CHECK_PERIOD_MS); DRM_HDCP2_CHECK_PERIOD_MS);
else if (!intel_hdcp_check_link(connector)) else if (!intel_hdcp_check_link(connector))
schedule_delayed_work(&hdcp->check_work, queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
DRM_HDCP_CHECK_PERIOD_MS); DRM_HDCP_CHECK_PERIOD_MS);
} }
static int i915_hdcp_component_bind(struct device *i915_kdev, static int i915_hdcp_component_bind(struct device *i915_kdev,
...@@ -2398,7 +2400,8 @@ int intel_hdcp_enable(struct intel_atomic_state *state, ...@@ -2398,7 +2400,8 @@ int intel_hdcp_enable(struct intel_atomic_state *state,
} }
if (!ret) { if (!ret) {
schedule_delayed_work(&hdcp->check_work, check_link_interval); queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
check_link_interval);
intel_hdcp_update_value(connector, intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_ENABLED, DRM_MODE_CONTENT_PROTECTION_ENABLED,
true); true);
...@@ -2447,6 +2450,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, ...@@ -2447,6 +2450,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
to_intel_connector(conn_state->connector); to_intel_connector(conn_state->connector);
struct intel_hdcp *hdcp = &connector->hdcp; struct intel_hdcp *hdcp = &connector->hdcp;
bool content_protection_type_changed, desired_and_not_enabled = false; bool content_protection_type_changed, desired_and_not_enabled = false;
struct drm_i915_private *i915 = to_i915(connector->base.dev);
if (!connector->hdcp.shim) if (!connector->hdcp.shim)
return; return;
...@@ -2473,7 +2477,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, ...@@ -2473,7 +2477,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
mutex_lock(&hdcp->mutex); mutex_lock(&hdcp->mutex);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
drm_connector_get(&connector->base); drm_connector_get(&connector->base);
schedule_work(&hdcp->prop_work); queue_work(i915->unordered_wq, &hdcp->prop_work);
mutex_unlock(&hdcp->mutex); mutex_unlock(&hdcp->mutex);
} }
...@@ -2490,7 +2494,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, ...@@ -2490,7 +2494,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
*/ */
if (!desired_and_not_enabled && !content_protection_type_changed) { if (!desired_and_not_enabled && !content_protection_type_changed) {
drm_connector_get(&connector->base); drm_connector_get(&connector->base);
schedule_work(&hdcp->prop_work); queue_work(i915->unordered_wq, &hdcp->prop_work);
} }
} }
...@@ -2602,6 +2606,7 @@ void intel_hdcp_atomic_check(struct drm_connector *connector, ...@@ -2602,6 +2606,7 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
void intel_hdcp_handle_cp_irq(struct intel_connector *connector) void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
{ {
struct intel_hdcp *hdcp = &connector->hdcp; struct intel_hdcp *hdcp = &connector->hdcp;
struct drm_i915_private *i915 = to_i915(connector->base.dev);
if (!hdcp->shim) if (!hdcp->shim)
return; return;
...@@ -2609,5 +2614,5 @@ void intel_hdcp_handle_cp_irq(struct intel_connector *connector) ...@@ -2609,5 +2614,5 @@ void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
atomic_inc(&connector->hdcp.cp_irq_count); atomic_inc(&connector->hdcp.cp_irq_count);
wake_up_all(&connector->hdcp.cp_irq_queue); wake_up_all(&connector->hdcp.cp_irq_queue);
schedule_delayed_work(&hdcp->check_work, 0); queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0);
} }
...@@ -212,7 +212,8 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) ...@@ -212,7 +212,8 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */ /* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) { if (hpd_disabled) {
drm_kms_helper_poll_enable(&dev_priv->drm); drm_kms_helper_poll_enable(&dev_priv->drm);
mod_delayed_work(system_wq, &dev_priv->display.hotplug.reenable_work, mod_delayed_work(dev_priv->unordered_wq,
&dev_priv->display.hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
} }
} }
...@@ -339,7 +340,8 @@ static void i915_digport_work_func(struct work_struct *work) ...@@ -339,7 +340,8 @@ static void i915_digport_work_func(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
dev_priv->display.hotplug.event_bits |= old_bits; dev_priv->display.hotplug.event_bits |= old_bits;
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0); queue_delayed_work(dev_priv->unordered_wq,
&dev_priv->display.hotplug.hotplug_work, 0);
} }
} }
...@@ -446,7 +448,8 @@ static void i915_hotplug_work_func(struct work_struct *work) ...@@ -446,7 +448,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
dev_priv->display.hotplug.retry_bits |= retry; dev_priv->display.hotplug.retry_bits |= retry;
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
mod_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, mod_delayed_work(dev_priv->unordered_wq,
&dev_priv->display.hotplug.hotplug_work,
msecs_to_jiffies(HPD_RETRY_DELAY)); msecs_to_jiffies(HPD_RETRY_DELAY));
} }
} }
...@@ -577,7 +580,8 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, ...@@ -577,7 +580,8 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (queue_dig) if (queue_dig)
queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work); queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
if (queue_hp) if (queue_hp)
queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0); queue_delayed_work(dev_priv->unordered_wq,
&dev_priv->display.hotplug.hotplug_work, 0);
} }
/** /**
...@@ -687,7 +691,8 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) ...@@ -687,7 +691,8 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
* As well, there's no issue if we race here since we always reschedule * As well, there's no issue if we race here since we always reschedule
* this worker anyway * this worker anyway
*/ */
schedule_work(&dev_priv->display.hotplug.poll_init_work); queue_work(dev_priv->unordered_wq,
&dev_priv->display.hotplug.poll_init_work);
} }
/** /**
...@@ -715,7 +720,8 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv) ...@@ -715,7 +720,8 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
return; return;
WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false); WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
schedule_work(&dev_priv->display.hotplug.poll_init_work); queue_work(dev_priv->unordered_wq,
&dev_priv->display.hotplug.poll_init_work);
} }
void intel_hpd_init_early(struct drm_i915_private *i915) void intel_hpd_init_early(struct drm_i915_private *i915)
......
...@@ -635,7 +635,8 @@ static void asle_work(struct work_struct *work) ...@@ -635,7 +635,8 @@ static void asle_work(struct work_struct *work)
void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{ {
if (dev_priv->display.opregion.asle) if (dev_priv->display.opregion.asle)
schedule_work(&dev_priv->display.opregion.asle_work); queue_work(dev_priv->unordered_wq,
&dev_priv->display.opregion.asle_work);
} }
#define ACPI_EV_DISPLAY_SWITCH (1<<0) #define ACPI_EV_DISPLAY_SWITCH (1<<0)
......
...@@ -867,6 +867,7 @@ static void edp_panel_vdd_work(struct work_struct *__work) ...@@ -867,6 +867,7 @@ static void edp_panel_vdd_work(struct work_struct *__work)
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
{ {
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
unsigned long delay; unsigned long delay;
/* /*
...@@ -882,7 +883,8 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) ...@@ -882,7 +883,8 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
* operations. * operations.
*/ */
delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5); delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay); queue_delayed_work(i915->unordered_wq,
&intel_dp->pps.panel_vdd_work, delay);
} }
/* /*
......
...@@ -341,7 +341,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) ...@@ -341,7 +341,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
*/ */
intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp)); intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp));
schedule_work(&intel_dp->psr.work); queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
} }
} }
...@@ -2440,6 +2440,8 @@ static void ...@@ -2440,6 +2440,8 @@ static void
tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits, tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
enum fb_op_origin origin) enum fb_op_origin origin)
{ {
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled || if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
!intel_dp->psr.active) !intel_dp->psr.active)
return; return;
...@@ -2453,7 +2455,7 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits, ...@@ -2453,7 +2455,7 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
return; return;
tgl_psr2_enable_dc3co(intel_dp); tgl_psr2_enable_dc3co(intel_dp);
mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work, mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
intel_dp->psr.dc3co_exit_delay); intel_dp->psr.dc3co_exit_delay);
} }
...@@ -2493,7 +2495,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp) ...@@ -2493,7 +2495,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
psr_force_hw_tracking_exit(intel_dp); psr_force_hw_tracking_exit(intel_dp);
if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits) if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
schedule_work(&intel_dp->psr.work); queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
} }
} }
......
...@@ -2327,6 +2327,7 @@ static u32 active_ccid(struct intel_engine_cs *engine) ...@@ -2327,6 +2327,7 @@ static u32 active_ccid(struct intel_engine_cs *engine)
static void execlists_capture(struct intel_engine_cs *engine) static void execlists_capture(struct intel_engine_cs *engine)
{ {
struct drm_i915_private *i915 = engine->i915;
struct execlists_capture *cap; struct execlists_capture *cap;
if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)) if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
...@@ -2375,7 +2376,7 @@ static void execlists_capture(struct intel_engine_cs *engine) ...@@ -2375,7 +2376,7 @@ static void execlists_capture(struct intel_engine_cs *engine)
goto err_rq; goto err_rq;
INIT_WORK(&cap->work, execlists_capture_work); INIT_WORK(&cap->work, execlists_capture_work);
schedule_work(&cap->work); queue_work(i915->unordered_wq, &cap->work);
return; return;
err_rq: err_rq:
...@@ -3680,7 +3681,7 @@ static void virtual_context_destroy(struct kref *kref) ...@@ -3680,7 +3681,7 @@ static void virtual_context_destroy(struct kref *kref)
* lock, we can delegate the free of the engine to an RCU worker. * lock, we can delegate the free of the engine to an RCU worker.
*/ */
INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy); INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
queue_rcu_work(system_wq, &ve->rcu); queue_rcu_work(ve->context.engine->i915->unordered_wq, &ve->rcu);
} }
static void virtual_engine_initial_hint(struct virtual_engine *ve) static void virtual_engine_initial_hint(struct virtual_engine *ve)
......
...@@ -88,10 +88,11 @@ static void pool_free_work(struct work_struct *wrk) ...@@ -88,10 +88,11 @@ static void pool_free_work(struct work_struct *wrk)
{ {
struct intel_gt_buffer_pool *pool = struct intel_gt_buffer_pool *pool =
container_of(wrk, typeof(*pool), work.work); container_of(wrk, typeof(*pool), work.work);
struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
if (pool_free_older_than(pool, HZ)) if (pool_free_older_than(pool, HZ))
schedule_delayed_work(&pool->work, queue_delayed_work(gt->i915->unordered_wq, &pool->work,
round_jiffies_up_relative(HZ)); round_jiffies_up_relative(HZ));
} }
static void pool_retire(struct i915_active *ref) static void pool_retire(struct i915_active *ref)
...@@ -99,6 +100,7 @@ static void pool_retire(struct i915_active *ref) ...@@ -99,6 +100,7 @@ static void pool_retire(struct i915_active *ref)
struct intel_gt_buffer_pool_node *node = struct intel_gt_buffer_pool_node *node =
container_of(ref, typeof(*node), active); container_of(ref, typeof(*node), active);
struct intel_gt_buffer_pool *pool = node->pool; struct intel_gt_buffer_pool *pool = node->pool;
struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
struct list_head *list = bucket_for_size(pool, node->obj->base.size); struct list_head *list = bucket_for_size(pool, node->obj->base.size);
unsigned long flags; unsigned long flags;
...@@ -116,8 +118,8 @@ static void pool_retire(struct i915_active *ref) ...@@ -116,8 +118,8 @@ static void pool_retire(struct i915_active *ref)
WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */ WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */
spin_unlock_irqrestore(&pool->lock, flags); spin_unlock_irqrestore(&pool->lock, flags);
schedule_delayed_work(&pool->work, queue_delayed_work(gt->i915->unordered_wq, &pool->work,
round_jiffies_up_relative(HZ)); round_jiffies_up_relative(HZ));
} }
void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node) void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node)
......
...@@ -376,7 +376,7 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir) ...@@ -376,7 +376,7 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
gt->i915->l3_parity.which_slice |= 1 << 0; gt->i915->l3_parity.which_slice |= 1 << 0;
schedule_work(&gt->i915->l3_parity.error_work); queue_work(gt->i915->unordered_wq, &gt->i915->l3_parity.error_work);
} }
void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
......
...@@ -116,7 +116,7 @@ void intel_engine_add_retire(struct intel_engine_cs *engine, ...@@ -116,7 +116,7 @@ void intel_engine_add_retire(struct intel_engine_cs *engine,
GEM_BUG_ON(intel_engine_is_virtual(engine)); GEM_BUG_ON(intel_engine_is_virtual(engine));
if (add_retire(engine, tl)) if (add_retire(engine, tl))
schedule_work(&engine->retire_work); queue_work(engine->i915->unordered_wq, &engine->retire_work);
} }
void intel_engine_init_retire(struct intel_engine_cs *engine) void intel_engine_init_retire(struct intel_engine_cs *engine)
...@@ -207,8 +207,8 @@ static void retire_work_handler(struct work_struct *work) ...@@ -207,8 +207,8 @@ static void retire_work_handler(struct work_struct *work)
struct intel_gt *gt = struct intel_gt *gt =
container_of(work, typeof(*gt), requests.retire_work.work); container_of(work, typeof(*gt), requests.retire_work.work);
schedule_delayed_work(&gt->requests.retire_work, queue_delayed_work(gt->i915->unordered_wq, &gt->requests.retire_work,
round_jiffies_up_relative(HZ)); round_jiffies_up_relative(HZ));
intel_gt_retire_requests(gt); intel_gt_retire_requests(gt);
} }
...@@ -224,8 +224,8 @@ void intel_gt_park_requests(struct intel_gt *gt) ...@@ -224,8 +224,8 @@ void intel_gt_park_requests(struct intel_gt *gt)
void intel_gt_unpark_requests(struct intel_gt *gt) void intel_gt_unpark_requests(struct intel_gt *gt)
{ {
schedule_delayed_work(&gt->requests.retire_work, queue_delayed_work(gt->i915->unordered_wq, &gt->requests.retire_work,
round_jiffies_up_relative(HZ)); round_jiffies_up_relative(HZ));
} }
void intel_gt_fini_requests(struct intel_gt *gt) void intel_gt_fini_requests(struct intel_gt *gt)
......
...@@ -1625,7 +1625,7 @@ void __intel_init_wedge(struct intel_wedge_me *w, ...@@ -1625,7 +1625,7 @@ void __intel_init_wedge(struct intel_wedge_me *w,
w->name = name; w->name = name;
INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me); INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
schedule_delayed_work(&w->work, timeout); queue_delayed_work(gt->i915->unordered_wq, &w->work, timeout);
} }
void __intel_fini_wedge(struct intel_wedge_me *w) void __intel_fini_wedge(struct intel_wedge_me *w)
......
...@@ -73,13 +73,14 @@ static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) ...@@ -73,13 +73,14 @@ static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
static void rps_timer(struct timer_list *t) static void rps_timer(struct timer_list *t)
{ {
struct intel_rps *rps = from_timer(rps, t, timer); struct intel_rps *rps = from_timer(rps, t, timer);
struct intel_gt *gt = rps_to_gt(rps);
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
ktime_t dt, last, timestamp; ktime_t dt, last, timestamp;
enum intel_engine_id id; enum intel_engine_id id;
s64 max_busy[3] = {}; s64 max_busy[3] = {};
timestamp = 0; timestamp = 0;
for_each_engine(engine, rps_to_gt(rps), id) { for_each_engine(engine, gt, id) {
s64 busy; s64 busy;
int i; int i;
...@@ -123,7 +124,7 @@ static void rps_timer(struct timer_list *t) ...@@ -123,7 +124,7 @@ static void rps_timer(struct timer_list *t)
busy += div_u64(max_busy[i], 1 << i); busy += div_u64(max_busy[i], 1 << i);
} }
GT_TRACE(rps_to_gt(rps), GT_TRACE(gt,
"busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n", "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n",
busy, (int)div64_u64(100 * busy, dt), busy, (int)div64_u64(100 * busy, dt),
max_busy[0], max_busy[1], max_busy[2], max_busy[0], max_busy[1], max_busy[2],
...@@ -133,12 +134,12 @@ static void rps_timer(struct timer_list *t) ...@@ -133,12 +134,12 @@ static void rps_timer(struct timer_list *t)
rps->cur_freq < rps->max_freq_softlimit) { rps->cur_freq < rps->max_freq_softlimit) {
rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD; rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD;
rps->pm_interval = 1; rps->pm_interval = 1;
schedule_work(&rps->work); queue_work(gt->i915->unordered_wq, &rps->work);
} else if (100 * busy < rps->power.down_threshold * dt && } else if (100 * busy < rps->power.down_threshold * dt &&
rps->cur_freq > rps->min_freq_softlimit) { rps->cur_freq > rps->min_freq_softlimit) {
rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD; rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD;
rps->pm_interval = 1; rps->pm_interval = 1;
schedule_work(&rps->work); queue_work(gt->i915->unordered_wq, &rps->work);
} else { } else {
rps->last_adj = 0; rps->last_adj = 0;
} }
...@@ -973,7 +974,7 @@ static int rps_set_boost_freq(struct intel_rps *rps, u32 val) ...@@ -973,7 +974,7 @@ static int rps_set_boost_freq(struct intel_rps *rps, u32 val)
} }
mutex_unlock(&rps->lock); mutex_unlock(&rps->lock);
if (boost) if (boost)
schedule_work(&rps->work); queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work);
return 0; return 0;
} }
...@@ -1025,7 +1026,8 @@ void intel_rps_boost(struct i915_request *rq) ...@@ -1025,7 +1026,8 @@ void intel_rps_boost(struct i915_request *rq)
if (!atomic_fetch_inc(&slpc->num_waiters)) { if (!atomic_fetch_inc(&slpc->num_waiters)) {
GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n", GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
rq->fence.context, rq->fence.seqno); rq->fence.context, rq->fence.seqno);
schedule_work(&slpc->boost_work); queue_work(rps_to_gt(rps)->i915->unordered_wq,
&slpc->boost_work);
} }
return; return;
...@@ -1041,7 +1043,7 @@ void intel_rps_boost(struct i915_request *rq) ...@@ -1041,7 +1043,7 @@ void intel_rps_boost(struct i915_request *rq)
rq->fence.context, rq->fence.seqno); rq->fence.context, rq->fence.seqno);
if (READ_ONCE(rps->cur_freq) < rps->boost_freq) if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
schedule_work(&rps->work); queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work);
WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */ WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */
} }
...@@ -1900,7 +1902,7 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) ...@@ -1900,7 +1902,7 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
gen6_gt_pm_mask_irq(gt, events); gen6_gt_pm_mask_irq(gt, events);
rps->pm_iir |= events; rps->pm_iir |= events;
schedule_work(&rps->work); queue_work(gt->i915->unordered_wq, &rps->work);
} }
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
...@@ -1917,7 +1919,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) ...@@ -1917,7 +1919,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
gen6_gt_pm_mask_irq(gt, events); gen6_gt_pm_mask_irq(gt, events);
rps->pm_iir |= events; rps->pm_iir |= events;
schedule_work(&rps->work); queue_work(gt->i915->unordered_wq, &rps->work);
spin_unlock(gt->irq_lock); spin_unlock(gt->irq_lock);
} }
......
...@@ -27,7 +27,7 @@ static void perf_begin(struct intel_gt *gt) ...@@ -27,7 +27,7 @@ static void perf_begin(struct intel_gt *gt)
/* Boost gpufreq to max [waitboost] and keep it fixed */ /* Boost gpufreq to max [waitboost] and keep it fixed */
atomic_inc(&gt->rps.num_waiters); atomic_inc(&gt->rps.num_waiters);
schedule_work(&gt->rps.work); queue_work(gt->i915->unordered_wq, &gt->rps.work);
flush_work(&gt->rps.work); flush_work(&gt->rps.work);
} }
......
...@@ -132,8 +132,20 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv) ...@@ -132,8 +132,20 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
if (dev_priv->display.hotplug.dp_wq == NULL) if (dev_priv->display.hotplug.dp_wq == NULL)
goto out_free_wq; goto out_free_wq;
/*
* The unordered i915 workqueue should be used for all work
* scheduling that do not require running in order, which used
* to be scheduled on the system_wq before moving to a driver
* instance due deprecation of flush_scheduled_work().
*/
dev_priv->unordered_wq = alloc_workqueue("i915-unordered", 0, 0);
if (dev_priv->unordered_wq == NULL)
goto out_free_dp_wq;
return 0; return 0;
out_free_dp_wq:
destroy_workqueue(dev_priv->display.hotplug.dp_wq);
out_free_wq: out_free_wq:
destroy_workqueue(dev_priv->wq); destroy_workqueue(dev_priv->wq);
out_err: out_err:
...@@ -144,6 +156,7 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv) ...@@ -144,6 +156,7 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{ {
destroy_workqueue(dev_priv->unordered_wq);
destroy_workqueue(dev_priv->display.hotplug.dp_wq); destroy_workqueue(dev_priv->display.hotplug.dp_wq);
destroy_workqueue(dev_priv->wq); destroy_workqueue(dev_priv->wq);
} }
......
...@@ -260,6 +260,16 @@ struct drm_i915_private { ...@@ -260,6 +260,16 @@ struct drm_i915_private {
*/ */
struct workqueue_struct *wq; struct workqueue_struct *wq;
/**
* unordered_wq - internal workqueue for unordered work
*
* This workqueue should be used for all unordered work
* scheduling within i915, which used to be scheduled on the
* system_wq before moving to a driver instance due
* deprecation of flush_scheduled_work().
*/
struct workqueue_struct *unordered_wq;
/* pm private clock gating functions */ /* pm private clock gating functions */
const struct drm_i915_clock_gating_funcs *clock_gating_funcs; const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
......
...@@ -290,7 +290,7 @@ static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer) ...@@ -290,7 +290,7 @@ static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer)
if (!i915_request_completed(rq)) { if (!i915_request_completed(rq)) {
if (llist_add(&rq->watchdog.link, &gt->watchdog.list)) if (llist_add(&rq->watchdog.link, &gt->watchdog.list))
schedule_work(&gt->watchdog.work); queue_work(gt->i915->unordered_wq, &gt->watchdog.work);
} else { } else {
i915_request_put(rq); i915_request_put(rq);
} }
......
...@@ -75,7 +75,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags) ...@@ -75,7 +75,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
/* Assume we are not in process context and so cannot sleep. */ /* Assume we are not in process context and so cannot sleep. */
if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) { if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
mod_delayed_work(system_wq, &wf->work, mod_delayed_work(wf->i915->unordered_wq, &wf->work,
FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags)); FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags));
return; return;
} }
......
...@@ -69,6 +69,7 @@ static void mock_device_release(struct drm_device *dev) ...@@ -69,6 +69,7 @@ static void mock_device_release(struct drm_device *dev)
i915_gem_drain_workqueue(i915); i915_gem_drain_workqueue(i915);
mock_fini_ggtt(to_gt(i915)->ggtt); mock_fini_ggtt(to_gt(i915)->ggtt);
destroy_workqueue(i915->unordered_wq);
destroy_workqueue(i915->wq); destroy_workqueue(i915->wq);
intel_region_ttm_device_fini(i915); intel_region_ttm_device_fini(i915);
...@@ -208,6 +209,10 @@ struct drm_i915_private *mock_gem_device(void) ...@@ -208,6 +209,10 @@ struct drm_i915_private *mock_gem_device(void)
if (!i915->wq) if (!i915->wq)
goto err_drv; goto err_drv;
i915->unordered_wq = alloc_workqueue("mock-unordered", 0, 0);
if (!i915->unordered_wq)
goto err_wq;
mock_init_contexts(i915); mock_init_contexts(i915);
/* allocate the ggtt */ /* allocate the ggtt */
...@@ -239,6 +244,8 @@ struct drm_i915_private *mock_gem_device(void) ...@@ -239,6 +244,8 @@ struct drm_i915_private *mock_gem_device(void)
err_context: err_context:
intel_gt_driver_remove(to_gt(i915)); intel_gt_driver_remove(to_gt(i915));
err_unlock: err_unlock:
destroy_workqueue(i915->unordered_wq);
err_wq:
destroy_workqueue(i915->wq); destroy_workqueue(i915->wq);
err_drv: err_drv:
intel_region_ttm_device_fini(i915); intel_region_ttm_device_fini(i915);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment