Commit 8d5682f6 authored by Daniele Ceraolo Spurio's avatar Daniele Ceraolo Spurio Committed by Chris Wilson

drm/i915/uc: Unify uC firmware upload

The way we load the firmwares is the same for both GuC and HuC, the only
difference is in the wopcm destination address and the dma flags, so we
easily can move the logic to a common function and pass in offset and
flags. The only other difference in the uplaod path are some the extra
steps that guc does before and after the xfer, but those don't require
the guc fw to be pinned in ggtt and can safely be performed before
calling the uc_upload function.

Note that this patch re-introduces the dma xfer wait for guc loading that
was removed with "drm/i915/guc: Propagate the fw xfer timeout". This is
not going to slow us down on a successful load (the dma has to complete
before fw init can start), but could slightly increase the timeout in case
of a fw init error.

v2: use _fw variants for uncore accesses (Chris), fix guc_fw status on
    failed wait.

v3: use dev_err and print DMA_CTRL (Chris)
Signed-off-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190725001813.4740-9-daniele.ceraolospurio@intel.com
parent 4ca8d2ef
...@@ -84,13 +84,6 @@ static void guc_xfer_rsa(struct intel_uc_fw *guc_fw, ...@@ -84,13 +84,6 @@ static void guc_xfer_rsa(struct intel_uc_fw *guc_fw,
intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]); intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]);
} }
static bool guc_xfer_completed(struct intel_uncore *uncore, u32 *status)
{
/* Did we complete the xfer? */
*status = intel_uncore_read(uncore, DMA_CTRL);
return !(*status & START_DMA);
}
/* /*
* Read the GuC status register (GUC_STATUS) and store it in the * Read the GuC status register (GUC_STATUS) and store it in the
* specified location; then return a boolean indicating whether * specified location; then return a boolean indicating whether
...@@ -137,65 +130,27 @@ static int guc_wait_ucode(struct intel_uncore *uncore) ...@@ -137,65 +130,27 @@ static int guc_wait_ucode(struct intel_uncore *uncore)
ret = -ENXIO; ret = -ENXIO;
} }
if (ret == 0 && !guc_xfer_completed(uncore, &status)) {
DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n",
status);
ret = -ENXIO;
}
return ret; return ret;
} }
/* /**
* Transfer the firmware image to RAM for execution by the microcontroller. * intel_guc_fw_upload() - load GuC uCode to device
* @guc: intel_guc structure
* *
* Architecturally, the DMA engine is bidirectional, and can potentially even * Called from intel_uc_init_hw() during driver load, resume from sleep and
* transfer between GTT locations. This functionality is left out of the API * after a GPU reset.
* for now as there is no need for it. *
*/ * The firmware image should have already been fetched into memory, so only
static int guc_xfer_ucode(struct intel_uc_fw *guc_fw, * check that fetch succeeded, and then transfer the image to the h/w.
struct intel_gt *gt) *
{ * Return: non-zero code on error
struct intel_uncore *uncore = gt->uncore;
unsigned long offset;
/*
* The header plus uCode will be copied to WOPCM via DMA, excluding any
* other components
*/
intel_uncore_write(uncore, DMA_COPY_SIZE,
guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */
offset = intel_uc_fw_ggtt_offset(guc_fw, gt->ggtt) + guc_fw->header_offset;
intel_uncore_write(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
intel_uncore_write(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
/*
* Set the DMA destination. Current uCode expects the code to be
* loaded at 8k; locations below this are used for the stack.
*/
intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0x2000);
intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
/* Finally start the DMA */
intel_uncore_write(uncore, DMA_CTRL,
_MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
return guc_wait_ucode(uncore);
}
/*
* Load the GuC firmware blob into the MinuteIA.
*/ */
static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct intel_gt *gt) int intel_guc_fw_upload(struct intel_guc *guc)
{ {
struct intel_gt *gt = guc_to_gt(guc);
struct intel_uncore *uncore = gt->uncore; struct intel_uncore *uncore = gt->uncore;
int ret; int ret;
GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
guc_prepare_xfer(uncore); guc_prepare_xfer(uncore);
/* /*
...@@ -203,32 +158,24 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct intel_gt *gt) ...@@ -203,32 +158,24 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct intel_gt *gt)
* by the DMA engine in one operation, whereas the RSA signature is * by the DMA engine in one operation, whereas the RSA signature is
* loaded via MMIO. * loaded via MMIO.
*/ */
guc_xfer_rsa(guc_fw, uncore); guc_xfer_rsa(&guc->fw, uncore);
ret = guc_xfer_ucode(guc_fw, gt); /*
* Current uCode expects the code to be loaded at 8k; locations below
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); * this are used for the stack.
*/
ret = intel_uc_fw_upload(&guc->fw, gt, 0x2000, UOS_MOVE);
if (ret)
goto out;
return ret; ret = guc_wait_ucode(uncore);
} if (ret)
goto out;
/** guc->fw.status = INTEL_UC_FIRMWARE_RUNNING;
* intel_guc_fw_upload() - load GuC uCode to device return 0;
* @guc: intel_guc structure
*
* Called from intel_uc_init_hw() during driver load, resume from sleep and
* after a GPU reset.
*
* The firmware image should have already been fetched into memory, so only
* check that fetch succeeded, and then transfer the image to the h/w.
*
* Return: non-zero code on error
*/
int intel_guc_fw_upload(struct intel_guc *guc)
{
int ret = intel_uc_fw_upload(&guc->fw, guc_to_gt(guc), guc_fw_xfer);
if (!ret)
guc->fw.status = INTEL_UC_FIRMWARE_RUNNING;
out:
guc->fw.status = INTEL_UC_FIRMWARE_FAIL;
return ret; return ret;
} }
...@@ -34,60 +34,6 @@ void intel_huc_fw_init_early(struct intel_huc *huc) ...@@ -34,60 +34,6 @@ void intel_huc_fw_init_early(struct intel_huc *huc)
intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, huc_to_gt(huc)->i915); intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, huc_to_gt(huc)->i915);
} }
/**
* huc_fw_xfer() - DMA's the firmware
* @huc_fw: the firmware descriptor
*
* Transfer the firmware image to RAM for execution by the microcontroller.
*
* Return: 0 on success, non-zero on failure
*/
static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
unsigned long offset = 0;
u32 size;
int ret;
GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Set the source address for the uCode */
offset = intel_uc_fw_ggtt_offset(huc_fw, gt->ggtt) +
huc_fw->header_offset;
intel_uncore_write(uncore, DMA_ADDR_0_LOW,
lower_32_bits(offset));
intel_uncore_write(uncore, DMA_ADDR_0_HIGH,
upper_32_bits(offset) & 0xFFFF);
/*
* Hardware doesn't look at destination address for HuC. Set it to 0,
* but still program the correct address space.
*/
intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0);
intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
size = huc_fw->header_size + huc_fw->ucode_size;
intel_uncore_write(uncore, DMA_COPY_SIZE, size);
/* Start the DMA */
intel_uncore_write(uncore, DMA_CTRL,
_MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
/* Wait for DMA to finish */
ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
/* Disable the bits once DMA is over */
intel_uncore_write(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
return ret;
}
/** /**
* intel_huc_fw_upload() - load HuC uCode to device * intel_huc_fw_upload() - load HuC uCode to device
* @huc: intel_huc structure * @huc: intel_huc structure
...@@ -102,5 +48,6 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct intel_gt *gt) ...@@ -102,5 +48,6 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct intel_gt *gt)
*/ */
int intel_huc_fw_upload(struct intel_huc *huc) int intel_huc_fw_upload(struct intel_huc *huc)
{ {
return intel_uc_fw_upload(&huc->fw, huc_to_gt(huc), huc_fw_xfer); /* HW doesn't look at destination address for HuC, so set it to 0 */
return intel_uc_fw_upload(&huc->fw, huc_to_gt(huc), 0, HUC_UKERNEL);
} }
...@@ -321,13 +321,24 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, ...@@ -321,13 +321,24 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
release_firmware(fw); /* OK even if fw is NULL */ release_firmware(fw); /* OK even if fw is NULL */
} }
static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
{
struct drm_mm_node *node = &ggtt->uc_fw;
GEM_BUG_ON(!node->allocated);
GEM_BUG_ON(upper_32_bits(node->start));
GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
return lower_32_bits(node->start);
}
static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw, static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw,
struct intel_gt *gt) struct intel_gt *gt)
{ {
struct drm_i915_gem_object *obj = uc_fw->obj; struct drm_i915_gem_object *obj = uc_fw->obj;
struct i915_ggtt *ggtt = gt->ggtt; struct i915_ggtt *ggtt = gt->ggtt;
struct i915_vma dummy = { struct i915_vma dummy = {
.node.start = intel_uc_fw_ggtt_offset(uc_fw, ggtt), .node.start = uc_fw_ggtt_offset(uc_fw, ggtt),
.node.size = obj->base.size, .node.size = obj->base.size,
.pages = obj->mm.pages, .pages = obj->mm.pages,
.vm = &ggtt->vm, .vm = &ggtt->vm,
...@@ -347,23 +358,69 @@ static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw, ...@@ -347,23 +358,69 @@ static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw,
{ {
struct drm_i915_gem_object *obj = uc_fw->obj; struct drm_i915_gem_object *obj = uc_fw->obj;
struct i915_ggtt *ggtt = gt->ggtt; struct i915_ggtt *ggtt = gt->ggtt;
u64 start = intel_uc_fw_ggtt_offset(uc_fw, ggtt); u64 start = uc_fw_ggtt_offset(uc_fw, ggtt);
ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size); ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
} }
static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
u32 wopcm_offset, u32 dma_flags)
{
struct intel_uncore *uncore = gt->uncore;
u64 offset;
int ret;
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Set the source address for the uCode */
offset = uc_fw_ggtt_offset(uc_fw, gt->ggtt) + uc_fw->header_offset;
GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
/* Set the DMA destination */
intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, wopcm_offset);
intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
/*
* Set the transfer size. The header plus uCode will be copied to WOPCM
* via DMA, excluding any other components
*/
intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
uc_fw->header_size + uc_fw->ucode_size);
/* Start the DMA */
intel_uncore_write_fw(uncore, DMA_CTRL,
_MASKED_BIT_ENABLE(dma_flags | START_DMA));
/* Wait for DMA to finish */
ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
if (ret)
dev_err(gt->i915->drm.dev, "DMA for %s fw failed, DMA_CTRL=%u\n",
intel_uc_fw_type_repr(uc_fw->type),
intel_uncore_read_fw(uncore, DMA_CTRL));
/* Disable the bits once DMA is over */
intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
return ret;
}
/** /**
* intel_uc_fw_upload - load uC firmware using custom loader * intel_uc_fw_upload - load uC firmware using custom loader
* @uc_fw: uC firmware * @uc_fw: uC firmware
* @gt: the intel_gt structure * @gt: the intel_gt structure
* @xfer: custom uC firmware loader function * @wopcm_offset: destination offset in wopcm
* @dma_flags: flags for flags for dma ctrl
* *
* Loads uC firmware using custom loader and updates internal flags. * Loads uC firmware and updates internal flags.
* *
* Return: 0 on success, non-zero on failure. * Return: 0 on success, non-zero on failure.
*/ */
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
int (*xfer)(struct intel_uc_fw *uc_fw, struct intel_gt *gt)) u32 wopcm_offset, u32 dma_flags)
{ {
int err; int err;
...@@ -377,7 +434,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, ...@@ -377,7 +434,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
return -ENOEXEC; return -ENOEXEC;
/* Call custom loader */ /* Call custom loader */
intel_uc_fw_ggtt_bind(uc_fw, gt); intel_uc_fw_ggtt_bind(uc_fw, gt);
err = xfer(uc_fw, gt); err = uc_fw_xfer(uc_fw, gt, wopcm_offset, dma_flags);
intel_uc_fw_ggtt_unbind(uc_fw, gt); intel_uc_fw_ggtt_unbind(uc_fw, gt);
if (err) if (err)
goto fail; goto fail;
...@@ -430,17 +487,6 @@ void intel_uc_fw_fini(struct intel_uc_fw *uc_fw) ...@@ -430,17 +487,6 @@ void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
i915_gem_object_unpin_pages(uc_fw->obj); i915_gem_object_unpin_pages(uc_fw->obj);
} }
u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
{
struct drm_mm_node *node = &ggtt->uc_fw;
GEM_BUG_ON(!node->allocated);
GEM_BUG_ON(upper_32_bits(node->start));
GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
return lower_32_bits(node->start);
}
/** /**
* intel_uc_fw_cleanup_fetch - cleanup uC firmware * intel_uc_fw_cleanup_fetch - cleanup uC firmware
* *
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
struct drm_printer; struct drm_printer;
struct drm_i915_private; struct drm_i915_private;
struct intel_gt; struct intel_gt;
struct i915_ggtt;
/* Home of GuC, HuC and DMC firmwares */ /* Home of GuC, HuC and DMC firmwares */
#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915" #define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915"
...@@ -174,10 +173,9 @@ void intel_uc_fw_fetch(struct drm_i915_private *i915, ...@@ -174,10 +173,9 @@ void intel_uc_fw_fetch(struct drm_i915_private *i915,
struct intel_uc_fw *uc_fw); struct intel_uc_fw *uc_fw);
void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw); void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
int (*xfer)(struct intel_uc_fw *uc_fw, struct intel_gt *gt)); u32 wopcm_offset, u32 dma_flags);
int intel_uc_fw_init(struct intel_uc_fw *uc_fw); int intel_uc_fw_init(struct intel_uc_fw *uc_fw);
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw); void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt);
size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len); size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len);
void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p); void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment