Commit ffd5ce22 authored by Michal Wajdeczko's avatar Michal Wajdeczko Committed by Chris Wilson

drm/i915/guc: Updates for GuC 32.0.3 firmware

New GuC 32.0.3 firmware made many changes around its ABI that
require driver updates:

* FW release version numbering schema now includes patch number
* FW release version encoding in CSS header
* Boot parameters
* Suspend/resume protocol
* Sample-forcewake command
* Additional Data Structures (ADS)

This commit is a squash of patches 3-8 from series [1].
[1] https://patchwork.freedesktop.org/series/58760/Signed-off-by: default avatarMichal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Anusha Srivatsa <anusha.srivatsa@intel.com>
Cc: Jeff Mcgee <jeff.mcgee@intel.com>
Cc: John Spotswood <john.a.spotswood@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Tomasz Lis <tomasz.lis@intel.com>
Acked-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> # numbering schema
Acked-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> # ccs heaser
Acked-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> # boot params
Acked-by: John Spotswood <john.a.spotswood@intel.com> # suspend/resume
Acked-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> # sample-forcewake
Acked-by: John Spotswood <john.a.spotswood@intel.com> # sample-forcewake
Acked-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> # ADS
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190527183613.17076-4-michal.wajdeczko@intel.com
parent a2904ade
...@@ -526,6 +526,8 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine); ...@@ -526,6 +526,8 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
struct i915_request * struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs *engine); intel_engine_find_active_request(struct intel_engine_cs *engine);
u32 intel_engine_context_size(struct drm_i915_private *i915, u8 class);
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
......
...@@ -156,7 +156,7 @@ static const struct engine_info intel_engines[] = { ...@@ -156,7 +156,7 @@ static const struct engine_info intel_engines[] = {
}; };
/** /**
* ___intel_engine_context_size() - return the size of the context for an engine * intel_engine_context_size() - return the size of the context for an engine
* @dev_priv: i915 device private * @dev_priv: i915 device private
* @class: engine class * @class: engine class
* *
...@@ -169,8 +169,7 @@ static const struct engine_info intel_engines[] = { ...@@ -169,8 +169,7 @@ static const struct engine_info intel_engines[] = {
* in LRC mode, but does not include the "shared data page" used with * in LRC mode, but does not include the "shared data page" used with
* GuC submission. The caller should account for this if using the GuC. * GuC submission. The caller should account for this if using the GuC.
*/ */
static u32 u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
__intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
{ {
u32 cxt_size; u32 cxt_size;
...@@ -327,8 +326,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv, ...@@ -327,8 +326,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->uabi_class = intel_engine_classes[info->class].uabi_class; engine->uabi_class = intel_engine_classes[info->class].uabi_class;
engine->context_size = __intel_engine_context_size(dev_priv, engine->context_size = intel_engine_context_size(dev_priv,
engine->class); engine->class);
if (WARN_ON(engine->context_size > BIT(20))) if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0; engine->context_size = 0;
if (engine->context_size) if (engine->context_size)
......
...@@ -250,14 +250,7 @@ void intel_guc_fini(struct intel_guc *guc) ...@@ -250,14 +250,7 @@ void intel_guc_fini(struct intel_guc *guc)
static u32 guc_ctl_debug_flags(struct intel_guc *guc) static u32 guc_ctl_debug_flags(struct intel_guc *guc)
{ {
u32 level = intel_guc_log_get_level(&guc->log); u32 level = intel_guc_log_get_level(&guc->log);
u32 flags; u32 flags = 0;
u32 ads;
ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
flags = ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED;
if (!GUC_LOG_LEVEL_IS_ENABLED(level))
flags |= GUC_LOG_DEFAULT_DISABLED;
if (!GUC_LOG_LEVEL_IS_VERBOSE(level)) if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
flags |= GUC_LOG_DISABLED; flags |= GUC_LOG_DISABLED;
...@@ -272,11 +265,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc) ...@@ -272,11 +265,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{ {
u32 flags = 0; u32 flags = 0;
flags |= GUC_CTL_VCS2_ENABLED; if (!USES_GUC_SUBMISSION(guc_to_i915(guc)))
if (USES_GUC_SUBMISSION(guc_to_i915(guc)))
flags |= GUC_CTL_KERNEL_SUBMISSIONS;
else
flags |= GUC_CTL_DISABLE_SCHEDULER; flags |= GUC_CTL_DISABLE_SCHEDULER;
return flags; return flags;
...@@ -340,6 +329,14 @@ static u32 guc_ctl_log_params_flags(struct intel_guc *guc) ...@@ -340,6 +329,14 @@ static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
return flags; return flags;
} }
static u32 guc_ctl_ads_flags(struct intel_guc *guc)
{
u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
u32 flags = ads << GUC_ADS_ADDR_SHIFT;
return flags;
}
/* /*
* Initialise the GuC parameter block before starting the firmware * Initialise the GuC parameter block before starting the firmware
* transfer. These parameters are read by the firmware on startup * transfer. These parameters are read by the firmware on startup
...@@ -353,20 +350,11 @@ void intel_guc_init_params(struct intel_guc *guc) ...@@ -353,20 +350,11 @@ void intel_guc_init_params(struct intel_guc *guc)
memset(params, 0, sizeof(params)); memset(params, 0, sizeof(params));
/* params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
* GuC ARAT increment is 10 ns. GuC default scheduler quantum is one params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
* second. This ARAR is calculated by:
* Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
*/
params[GUC_CTL_ARAT_HIGH] = 0;
params[GUC_CTL_ARAT_LOW] = 100000000;
params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc); params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc); params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc); params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]); DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
...@@ -550,25 +538,33 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset) ...@@ -550,25 +538,33 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
return intel_guc_send(guc, action, ARRAY_SIZE(action)); return intel_guc_send(guc, action, ARRAY_SIZE(action));
} }
/* /**
* The ENTER/EXIT_S_STATE actions queue the save/restore operation in GuC FW and * intel_guc_suspend() - notify GuC entering suspend state
* then return, so waiting on the H2G is not enough to guarantee GuC is done. * @guc: the guc
* When all the processing is done, GuC writes INTEL_GUC_SLEEP_STATE_SUCCESS to
* scratch register 14, so we can poll on that. Note that GuC does not ensure
* that the value in the register is different from
* INTEL_GUC_SLEEP_STATE_SUCCESS while the action is in progress so we need to
* take care of that ourselves as well.
*/ */
static int guc_sleep_state_action(struct intel_guc *guc, int intel_guc_suspend(struct intel_guc *guc)
const u32 *action, u32 len)
{ {
struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_private *dev_priv = guc_to_i915(guc);
int ret; int ret;
u32 status; u32 status;
u32 action[] = {
INTEL_GUC_ACTION_ENTER_S_STATE,
GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
};
/*
* The ENTER_S_STATE action queues the save/restore operation in GuC FW
* and then returns, so waiting on the H2G is not enough to guarantee
* GuC is done. When all the processing is done, GuC writes
* INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll
* on that. Note that GuC does not ensure that the value in the register
* is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is
* in progress so we need to take care of that ourselves as well.
*/
I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK); I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK);
ret = intel_guc_send(guc, action, len); ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
if (ret) if (ret)
return ret; return ret;
...@@ -588,21 +584,6 @@ static int guc_sleep_state_action(struct intel_guc *guc, ...@@ -588,21 +584,6 @@ static int guc_sleep_state_action(struct intel_guc *guc,
return 0; return 0;
} }
/**
* intel_guc_suspend() - notify GuC entering suspend state
* @guc: the guc
*/
int intel_guc_suspend(struct intel_guc *guc)
{
u32 data[] = {
INTEL_GUC_ACTION_ENTER_S_STATE,
GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
intel_guc_ggtt_offset(guc, guc->shared_data)
};
return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
}
/** /**
* intel_guc_reset_engine() - ask GuC to reset an engine * intel_guc_reset_engine() - ask GuC to reset an engine
* @guc: intel_guc structure * @guc: intel_guc structure
...@@ -632,13 +613,12 @@ int intel_guc_reset_engine(struct intel_guc *guc, ...@@ -632,13 +613,12 @@ int intel_guc_reset_engine(struct intel_guc *guc,
*/ */
int intel_guc_resume(struct intel_guc *guc) int intel_guc_resume(struct intel_guc *guc)
{ {
u32 data[] = { u32 action[] = {
INTEL_GUC_ACTION_EXIT_S_STATE, INTEL_GUC_ACTION_EXIT_S_STATE,
GUC_POWER_D0, GUC_POWER_D0,
intel_guc_ggtt_offset(guc, guc->shared_data)
}; };
return guc_sleep_state_action(guc, data, ARRAY_SIZE(data)); return intel_guc_send(guc, action, ARRAY_SIZE(action));
} }
/** /**
......
...@@ -51,7 +51,7 @@ static void guc_policies_init(struct guc_policies *policies) ...@@ -51,7 +51,7 @@ static void guc_policies_init(struct guc_policies *policies)
policies->max_num_work_items = POLICY_MAX_NUM_WI; policies->max_num_work_items = POLICY_MAX_NUM_WI;
for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) { for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) { for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++) {
policy = &policies->policy[p][i]; policy = &policies->policy[p][i];
guc_policy_init(policy); guc_policy_init(policy);
...@@ -61,6 +61,11 @@ static void guc_policies_init(struct guc_policies *policies) ...@@ -61,6 +61,11 @@ static void guc_policies_init(struct guc_policies *policies)
policies->is_valid = 1; policies->is_valid = 1;
} }
static void guc_ct_pool_entries_init(struct guc_ct_pool_entry *pool, u32 num)
{
memset(pool, 0, num * sizeof(*pool));
}
/* /*
* The first 80 dwords of the register state context, containing the * The first 80 dwords of the register state context, containing the
* execlists and ppgtt registers. * execlists and ppgtt registers.
...@@ -75,20 +80,21 @@ static void guc_policies_init(struct guc_policies *policies) ...@@ -75,20 +80,21 @@ static void guc_policies_init(struct guc_policies *policies)
int intel_guc_ads_create(struct intel_guc *guc) int intel_guc_ads_create(struct intel_guc *guc)
{ {
struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct i915_vma *vma, *kernel_ctx_vma; struct i915_vma *vma;
struct page *page;
/* The ads obj includes the struct itself and buffers passed to GuC */ /* The ads obj includes the struct itself and buffers passed to GuC */
struct { struct {
struct guc_ads ads; struct guc_ads ads;
struct guc_policies policies; struct guc_policies policies;
struct guc_mmio_reg_state reg_state; struct guc_mmio_reg_state reg_state;
struct guc_gt_system_info system_info;
struct guc_clients_info clients_info;
struct guc_ct_pool_entry ct_pool[GUC_CT_POOL_SIZE];
u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE]; u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
} __packed *blob; } __packed *blob;
struct intel_engine_cs *engine;
enum intel_engine_id id;
const u32 skipped_offset = LRC_HEADER_PAGES * PAGE_SIZE;
const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
u32 base; u32 base;
u8 engine_class;
int ret;
GEM_BUG_ON(guc->ads_vma); GEM_BUG_ON(guc->ads_vma);
...@@ -98,51 +104,68 @@ int intel_guc_ads_create(struct intel_guc *guc) ...@@ -98,51 +104,68 @@ int intel_guc_ads_create(struct intel_guc *guc)
guc->ads_vma = vma; guc->ads_vma = vma;
page = i915_vma_first_page(vma); blob = i915_gem_object_pin_map(guc->ads_vma->obj, I915_MAP_WB);
blob = kmap(page); if (IS_ERR(blob)) {
ret = PTR_ERR(blob);
goto err_vma;
}
/* GuC scheduling policies */ /* GuC scheduling policies */
guc_policies_init(&blob->policies); guc_policies_init(&blob->policies);
/* MMIO reg state */
for_each_engine(engine, dev_priv, id) {
blob->reg_state.white_list[engine->guc_id].mmio_start =
engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
/* Nothing to be saved or restored for now. */
blob->reg_state.white_list[engine->guc_id].count = 0;
}
/* /*
* The GuC requires a "Golden Context" when it reinitialises * GuC expects a per-engine-class context image and size
* engines after a reset. Here we use the Render ring default * (minus hwsp and ring context). The context image will be
* context, which must already exist and be pinned in the GGTT, * used to reinitialize engines after a reset. It must exist
* so its address won't change after we've told the GuC where * and be pinned in the GGTT, so that the address won't change after
* to find it. Note that we have to skip our header (1 page), * we have told GuC where to find it. The context size will be used
* because our GuC shared data is there. * to validate that the LRC base + size fall within allowed GGTT.
*/ */
kernel_ctx_vma = dev_priv->engine[RCS0]->kernel_context->state; for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
blob->ads.golden_context_lrca = if (engine_class == OTHER_CLASS)
intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset; continue;
/*
* TODO: Set context pointer to default state to allow
* GuC to re-init guilty contexts after internal reset.
*/
blob->ads.golden_context_lrca[engine_class] = 0;
blob->ads.eng_state_size[engine_class] =
intel_engine_context_size(dev_priv, engine_class) -
skipped_size;
}
/* /* System info */
* The GuC expects us to exclude the portion of the context image that blob->system_info.slice_enabled = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask);
* it skips from the size it is to read. It starts reading from after blob->system_info.rcs_enabled = 1;
* the execlist context (so skipping the first page [PPHWSP] and 80 blob->system_info.bcs_enabled = 1;
* dwords). Weird guc is weird.
*/ blob->system_info.vdbox_enable_mask = VDBOX_MASK(dev_priv);
for_each_engine(engine, dev_priv, id) blob->system_info.vebox_enable_mask = VEBOX_MASK(dev_priv);
blob->ads.eng_state_size[engine->guc_id] = blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
engine->context_size - skipped_size;
base = intel_guc_ggtt_offset(guc, vma); base = intel_guc_ggtt_offset(guc, vma);
/* Clients info */
guc_ct_pool_entries_init(blob->ct_pool, ARRAY_SIZE(blob->ct_pool));
blob->clients_info.clients_num = 1;
blob->clients_info.ct_pool_addr = base + ptr_offset(blob, ct_pool);
blob->clients_info.ct_pool_count = ARRAY_SIZE(blob->ct_pool);
/* ADS */
blob->ads.scheduler_policies = base + ptr_offset(blob, policies); blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer); blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer);
blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state); blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
blob->ads.gt_system_info = base + ptr_offset(blob, system_info);
blob->ads.clients_info = base + ptr_offset(blob, clients_info);
kunmap(page); i915_gem_object_unpin_map(guc->ads_vma->obj);
return 0; return 0;
err_vma:
i915_vma_unpin_and_release(&guc->ads_vma, 0);
return ret;
} }
void intel_guc_ads_destroy(struct intel_guc *guc) void intel_guc_ads_destroy(struct intel_guc *guc)
......
...@@ -30,53 +30,60 @@ ...@@ -30,53 +30,60 @@
#include "intel_guc_fw.h" #include "intel_guc_fw.h"
#include "i915_drv.h" #include "i915_drv.h"
#define SKL_FW_MAJOR 9 #define __MAKE_GUC_FW_PATH(KEY) \
#define SKL_FW_MINOR 33 "i915/" \
__stringify(KEY##_GUC_FW_PREFIX) "_guc_" \
#define BXT_FW_MAJOR 9 __stringify(KEY##_GUC_FW_MAJOR) "." \
#define BXT_FW_MINOR 29 __stringify(KEY##_GUC_FW_MINOR) "." \
__stringify(KEY##_GUC_FW_PATCH) ".bin"
#define KBL_FW_MAJOR 9
#define KBL_FW_MINOR 39 #define SKL_GUC_FW_PREFIX skl
#define SKL_GUC_FW_MAJOR 32
#define GUC_FW_PATH(platform, major, minor) \ #define SKL_GUC_FW_MINOR 0
"i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin" #define SKL_GUC_FW_PATCH 3
#define SKL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(SKL)
#define I915_SKL_GUC_UCODE GUC_FW_PATH(skl, SKL_FW_MAJOR, SKL_FW_MINOR) MODULE_FIRMWARE(SKL_GUC_FIRMWARE_PATH);
MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
#define BXT_GUC_FW_PREFIX bxt
#define I915_BXT_GUC_UCODE GUC_FW_PATH(bxt, BXT_FW_MAJOR, BXT_FW_MINOR) #define BXT_GUC_FW_MAJOR 32
MODULE_FIRMWARE(I915_BXT_GUC_UCODE); #define BXT_GUC_FW_MINOR 0
#define BXT_GUC_FW_PATCH 3
#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR) #define BXT_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(BXT)
MODULE_FIRMWARE(I915_KBL_GUC_UCODE); MODULE_FIRMWARE(BXT_GUC_FIRMWARE_PATH);
#define KBL_GUC_FW_PREFIX kbl
#define KBL_GUC_FW_MAJOR 32
#define KBL_GUC_FW_MINOR 0
#define KBL_GUC_FW_PATCH 3
#define KBL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(KBL)
MODULE_FIRMWARE(KBL_GUC_FIRMWARE_PATH);
static void guc_fw_select(struct intel_uc_fw *guc_fw) static void guc_fw_select(struct intel_uc_fw *guc_fw)
{ {
struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_private *i915 = guc_to_i915(guc);
GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
if (!HAS_GUC(dev_priv)) if (!HAS_GUC(i915))
return; return;
if (i915_modparams.guc_firmware_path) { if (i915_modparams.guc_firmware_path) {
guc_fw->path = i915_modparams.guc_firmware_path; guc_fw->path = i915_modparams.guc_firmware_path;
guc_fw->major_ver_wanted = 0; guc_fw->major_ver_wanted = 0;
guc_fw->minor_ver_wanted = 0; guc_fw->minor_ver_wanted = 0;
} else if (IS_SKYLAKE(dev_priv)) { } else if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
guc_fw->path = I915_SKL_GUC_UCODE; guc_fw->path = KBL_GUC_FIRMWARE_PATH;
guc_fw->major_ver_wanted = SKL_FW_MAJOR; guc_fw->major_ver_wanted = KBL_GUC_FW_MAJOR;
guc_fw->minor_ver_wanted = SKL_FW_MINOR; guc_fw->minor_ver_wanted = KBL_GUC_FW_MINOR;
} else if (IS_BROXTON(dev_priv)) { } else if (IS_BROXTON(i915)) {
guc_fw->path = I915_BXT_GUC_UCODE; guc_fw->path = BXT_GUC_FIRMWARE_PATH;
guc_fw->major_ver_wanted = BXT_FW_MAJOR; guc_fw->major_ver_wanted = BXT_GUC_FW_MAJOR;
guc_fw->minor_ver_wanted = BXT_FW_MINOR; guc_fw->minor_ver_wanted = BXT_GUC_FW_MINOR;
} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { } else if (IS_SKYLAKE(i915)) {
guc_fw->path = I915_KBL_GUC_UCODE; guc_fw->path = SKL_GUC_FIRMWARE_PATH;
guc_fw->major_ver_wanted = KBL_FW_MAJOR; guc_fw->major_ver_wanted = SKL_GUC_FW_MAJOR;
guc_fw->minor_ver_wanted = KBL_FW_MINOR; guc_fw->minor_ver_wanted = SKL_GUC_FW_MINOR;
} }
} }
......
...@@ -39,6 +39,14 @@ ...@@ -39,6 +39,14 @@
#define GUC_VIDEO_ENGINE2 4 #define GUC_VIDEO_ENGINE2 4
#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) #define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
/*
* XXX: Beware that Gen9 firmware 32.x uses wrong definition for
* GUC_MAX_INSTANCES_PER_CLASS (1) but this is harmless for us now
* as we are not enabling GuC submission mode where this will be used
*/
#define GUC_MAX_ENGINE_CLASSES 5
#define GUC_MAX_INSTANCES_PER_CLASS 4
#define GUC_DOORBELL_INVALID 256 #define GUC_DOORBELL_INVALID 256
#define GUC_DB_SIZE (PAGE_SIZE) #define GUC_DB_SIZE (PAGE_SIZE)
...@@ -73,44 +81,28 @@ ...@@ -73,44 +81,28 @@
#define GUC_STAGE_DESC_ATTR_PCH BIT(6) #define GUC_STAGE_DESC_ATTR_PCH BIT(6)
#define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7) #define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7)
/* The guc control data is 10 DWORDs */ /* New GuC control data */
#define GUC_CTL_CTXINFO 0 #define GUC_CTL_CTXINFO 0
#define GUC_CTL_CTXNUM_IN16_SHIFT 0 #define GUC_CTL_CTXNUM_IN16_SHIFT 0
#define GUC_CTL_BASE_ADDR_SHIFT 12 #define GUC_CTL_BASE_ADDR_SHIFT 12
#define GUC_CTL_ARAT_HIGH 1 #define GUC_CTL_LOG_PARAMS 1
#define GUC_CTL_ARAT_LOW 2
#define GUC_CTL_DEVICE_INFO 3
#define GUC_CTL_LOG_PARAMS 4
#define GUC_LOG_VALID (1 << 0) #define GUC_LOG_VALID (1 << 0)
#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1) #define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1)
#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3) #define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3)
#define GUC_LOG_CRASH_SHIFT 4 #define GUC_LOG_CRASH_SHIFT 4
#define GUC_LOG_CRASH_MASK (0x1 << GUC_LOG_CRASH_SHIFT) #define GUC_LOG_CRASH_MASK (0x3 << GUC_LOG_CRASH_SHIFT)
#define GUC_LOG_DPC_SHIFT 6 #define GUC_LOG_DPC_SHIFT 6
#define GUC_LOG_DPC_MASK (0x7 << GUC_LOG_DPC_SHIFT) #define GUC_LOG_DPC_MASK (0x7 << GUC_LOG_DPC_SHIFT)
#define GUC_LOG_ISR_SHIFT 9 #define GUC_LOG_ISR_SHIFT 9
#define GUC_LOG_ISR_MASK (0x7 << GUC_LOG_ISR_SHIFT) #define GUC_LOG_ISR_MASK (0x7 << GUC_LOG_ISR_SHIFT)
#define GUC_LOG_BUF_ADDR_SHIFT 12 #define GUC_LOG_BUF_ADDR_SHIFT 12
#define GUC_CTL_PAGE_FAULT_CONTROL 5 #define GUC_CTL_WA 2
#define GUC_CTL_FEATURE 3
#define GUC_CTL_DISABLE_SCHEDULER (1 << 14)
#define GUC_CTL_WA 6 #define GUC_CTL_DEBUG 4
#define GUC_CTL_WA_UK_BY_DRIVER (1 << 3)
#define GUC_CTL_FEATURE 7
#define GUC_CTL_VCS2_ENABLED (1 << 0)
#define GUC_CTL_KERNEL_SUBMISSIONS (1 << 1)
#define GUC_CTL_FEATURE2 (1 << 2)
#define GUC_CTL_POWER_GATING (1 << 3)
#define GUC_CTL_DISABLE_SCHEDULER (1 << 4)
#define GUC_CTL_PREEMPTION_LOG (1 << 5)
#define GUC_CTL_ENABLE_SLPC (1 << 7)
#define GUC_CTL_RESET_ON_PREMPT_FAILURE (1 << 8)
#define GUC_CTL_DEBUG 8
#define GUC_LOG_VERBOSITY_SHIFT 0 #define GUC_LOG_VERBOSITY_SHIFT 0
#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT) #define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
#define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT) #define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT)
...@@ -123,13 +115,10 @@ ...@@ -123,13 +115,10 @@
#define GUC_LOG_DESTINATION_MASK (3 << 4) #define GUC_LOG_DESTINATION_MASK (3 << 4)
#define GUC_LOG_DISABLED (1 << 6) #define GUC_LOG_DISABLED (1 << 6)
#define GUC_PROFILE_ENABLED (1 << 7) #define GUC_PROFILE_ENABLED (1 << 7)
#define GUC_WQ_TRACK_ENABLED (1 << 8)
#define GUC_ADS_ENABLED (1 << 9)
#define GUC_LOG_DEFAULT_DISABLED (1 << 10)
#define GUC_ADS_ADDR_SHIFT 11
#define GUC_ADS_ADDR_MASK 0xfffff800
#define GUC_CTL_RSRVD 9 #define GUC_CTL_ADS 5
#define GUC_ADS_ADDR_SHIFT 1
#define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT)
#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */ #define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */
...@@ -168,11 +157,7 @@ ...@@ -168,11 +157,7 @@
* in fw. So driver will load a truncated firmware in this case. * in fw. So driver will load a truncated firmware in this case.
* *
* HuC firmware layout is same as GuC firmware. * HuC firmware layout is same as GuC firmware.
* * Only HuC version information is saved in a different way.
* HuC firmware css header is different. However, the only difference is where
* the version information is saved. The uc_css_header is unified to support
* both. Driver should get HuC version from uc_css_header.huc_sw_version, while
* uc_css_header.guc_sw_version for GuC.
*/ */
struct uc_css_header { struct uc_css_header {
...@@ -183,41 +168,27 @@ struct uc_css_header { ...@@ -183,41 +168,27 @@ struct uc_css_header {
u32 header_version; u32 header_version;
u32 module_id; u32 module_id;
u32 module_vendor; u32 module_vendor;
union { u32 date;
struct { #define CSS_DATE_DAY (0xFF << 0)
u8 day; #define CSS_DATE_MONTH (0xFF << 8)
u8 month; #define CSS_DATE_YEAR (0xFFFF << 16)
u16 year;
};
u32 date;
};
u32 size_dw; /* uCode plus header_size_dw */ u32 size_dw; /* uCode plus header_size_dw */
u32 key_size_dw; u32 key_size_dw;
u32 modulus_size_dw; u32 modulus_size_dw;
u32 exponent_size_dw; u32 exponent_size_dw;
union { u32 time;
struct { #define CSS_TIME_HOUR (0xFF << 0)
u8 hour; #define CSS_DATE_MIN (0xFF << 8)
u8 min; #define CSS_DATE_SEC (0xFFFF << 16)
u16 sec;
};
u32 time;
};
char username[8]; char username[8];
char buildnumber[12]; char buildnumber[12];
union { u32 sw_version;
struct { #define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16)
u32 branch_client_version; #define CSS_SW_VERSION_GUC_MINOR (0xFF << 8)
u32 sw_version; #define CSS_SW_VERSION_GUC_PATCH (0xFF << 0)
} guc; #define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16)
struct { #define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0)
u32 sw_version; u32 reserved[14];
u32 reserved;
} huc;
};
u32 prod_preprod_fw;
u32 reserved[12];
u32 header_info; u32 header_info;
} __packed; } __packed;
...@@ -423,23 +394,19 @@ struct guc_ct_buffer_desc { ...@@ -423,23 +394,19 @@ struct guc_ct_buffer_desc {
struct guc_policy { struct guc_policy {
/* Time for one workload to execute. (in micro seconds) */ /* Time for one workload to execute. (in micro seconds) */
u32 execution_quantum; u32 execution_quantum;
u32 reserved1;
/* Time to wait for a preemption request to completed before issuing a /* Time to wait for a preemption request to completed before issuing a
* reset. (in micro seconds). */ * reset. (in micro seconds). */
u32 preemption_time; u32 preemption_time;
/* How much time to allow to run after the first fault is observed. /* How much time to allow to run after the first fault is observed.
* Then preempt afterwards. (in micro seconds) */ * Then preempt afterwards. (in micro seconds) */
u32 fault_time; u32 fault_time;
u32 policy_flags; u32 policy_flags;
u32 reserved[2]; u32 reserved[8];
} __packed; } __packed;
struct guc_policies { struct guc_policies {
struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINES_NUM]; struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINE_CLASSES];
u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES];
/* In micro seconds. How much time to allow before DPC processing is /* In micro seconds. How much time to allow before DPC processing is
* called back via interrupt (to prevent DPC queue drain starving). * called back via interrupt (to prevent DPC queue drain starving).
* Typically 1000s of micro seconds (example only, not granularity). */ * Typically 1000s of micro seconds (example only, not granularity). */
...@@ -452,57 +419,73 @@ struct guc_policies { ...@@ -452,57 +419,73 @@ struct guc_policies {
* idle. */ * idle. */
u32 max_num_work_items; u32 max_num_work_items;
u32 reserved[19]; u32 reserved[4];
} __packed; } __packed;
/* GuC MMIO reg state struct */ /* GuC MMIO reg state struct */
#define GUC_REGSET_FLAGS_NONE 0x0
#define GUC_REGSET_POWERCYCLE 0x1
#define GUC_REGSET_MASKED 0x2
#define GUC_REGSET_ENGINERESET 0x4
#define GUC_REGSET_SAVE_DEFAULT_VALUE 0x8
#define GUC_REGSET_SAVE_CURRENT_VALUE 0x10
#define GUC_REGSET_MAX_REGISTERS 25 #define GUC_REGSET_MAX_REGISTERS 64
#define GUC_MMIO_WHITE_LIST_START 0x24d0
#define GUC_MMIO_WHITE_LIST_MAX 12
#define GUC_S3_SAVE_SPACE_PAGES 10 #define GUC_S3_SAVE_SPACE_PAGES 10
struct guc_mmio_regset { struct guc_mmio_reg {
struct __packed { u32 offset;
u32 offset; u32 value;
u32 value; u32 flags;
u32 flags; #define GUC_REGSET_MASKED (1 << 0)
} registers[GUC_REGSET_MAX_REGISTERS]; } __packed;
struct guc_mmio_regset {
struct guc_mmio_reg registers[GUC_REGSET_MAX_REGISTERS];
u32 values_valid; u32 values_valid;
u32 number_of_registers; u32 number_of_registers;
} __packed; } __packed;
/* MMIO registers that are set as non privileged */ /* GuC register sets */
struct mmio_white_list { struct guc_mmio_reg_state {
u32 mmio_start; struct guc_mmio_regset engine_reg[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
u32 offsets[GUC_MMIO_WHITE_LIST_MAX]; u32 reserved[98];
u32 count;
} __packed; } __packed;
struct guc_mmio_reg_state { /* HW info */
struct guc_mmio_regset global_reg; struct guc_gt_system_info {
struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM]; u32 slice_enabled;
struct mmio_white_list white_list[GUC_MAX_ENGINES_NUM]; u32 rcs_enabled;
u32 reserved0;
u32 bcs_enabled;
u32 vdbox_enable_mask;
u32 vdbox_sfc_support_mask;
u32 vebox_enable_mask;
u32 reserved[9];
} __packed; } __packed;
/* GuC Additional Data Struct */ /* Clients info */
struct guc_ct_pool_entry {
struct guc_ct_buffer_desc desc;
u32 reserved[7];
} __packed;
#define GUC_CT_POOL_SIZE 2
struct guc_clients_info {
u32 clients_num;
u32 reserved0[13];
u32 ct_pool_addr;
u32 ct_pool_count;
u32 reserved[4];
} __packed;
/* GuC Additional Data Struct */
struct guc_ads { struct guc_ads {
u32 reg_state_addr; u32 reg_state_addr;
u32 reg_state_buffer; u32 reg_state_buffer;
u32 golden_context_lrca;
u32 scheduler_policies; u32 scheduler_policies;
u32 reserved0[3]; u32 gt_system_info;
u32 eng_state_size[GUC_MAX_ENGINES_NUM]; u32 clients_info;
u32 reserved2[4]; u32 control_data;
u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES];
u32 eng_state_size[GUC_MAX_ENGINE_CLASSES];
u32 reserved[16];
} __packed; } __packed;
/* GuC logging structures */ /* GuC logging structures */
...@@ -646,7 +629,6 @@ enum intel_guc_action { ...@@ -646,7 +629,6 @@ enum intel_guc_action {
INTEL_GUC_ACTION_DEFAULT = 0x0, INTEL_GUC_ACTION_DEFAULT = 0x0,
INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2, INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2,
INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3, INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3,
INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10, INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20, INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30, INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
...@@ -654,6 +636,7 @@ enum intel_guc_action { ...@@ -654,6 +636,7 @@ enum intel_guc_action {
INTEL_GUC_ACTION_ENTER_S_STATE = 0x501, INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
INTEL_GUC_ACTION_EXIT_S_STATE = 0x502, INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003, INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003,
INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x3005,
INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000, INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505, INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506, INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
...@@ -674,9 +657,9 @@ enum intel_guc_report_status { ...@@ -674,9 +657,9 @@ enum intel_guc_report_status {
}; };
enum intel_guc_sleep_state_status { enum intel_guc_sleep_state_status {
INTEL_GUC_SLEEP_STATE_SUCCESS = 0x0, INTEL_GUC_SLEEP_STATE_SUCCESS = 0x1,
INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x1, INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x2,
INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x2 INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x3
#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000 #define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000
}; };
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
* *
*/ */
#include <linux/bitfield.h>
#include <linux/firmware.h> #include <linux/firmware.h>
#include <drm/drm_print.h> #include <drm/drm_print.h>
...@@ -119,21 +120,20 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, ...@@ -119,21 +120,20 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
goto fail; goto fail;
} }
/* /* Get version numbers from the CSS header */
* The GuC firmware image has the version number embedded at a
* well-known offset within the firmware blob; note that major / minor
* version are TWO bytes each (i.e. u16), although all pointers and
* offsets are defined in terms of bytes (u8).
*/
switch (uc_fw->type) { switch (uc_fw->type) {
case INTEL_UC_FW_TYPE_GUC: case INTEL_UC_FW_TYPE_GUC:
uc_fw->major_ver_found = css->guc.sw_version >> 16; uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MAJOR,
uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF; css->sw_version);
uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MINOR,
css->sw_version);
break; break;
case INTEL_UC_FW_TYPE_HUC: case INTEL_UC_FW_TYPE_HUC:
uc_fw->major_ver_found = css->huc.sw_version >> 16; uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MAJOR,
uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF; css->sw_version);
uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MINOR,
css->sw_version);
break; break;
default: default:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment