Commit c1156cce authored by Mark Brown's avatar Mark Brown

ASoC: Intel: AVS - Audio DSP for cAVS

Merge series from Cezary Rojewski <cezary.rojewski@intel.com>:

A continuation of cleanup work of Intel SST solutions found in
sound/soc/intel/. With two major chapters released last year catpt [1]
and removal of haswell solution [2], time has come for Skylake-driver.

Througout 2019, 2020 and 2021 Skylake-driver has had many fixes applied
and even attempts of refactors as seen in fundamental overhaul [3], IPC
flow adjustments [4] and LARGE_CONFIG overhaul [5] series.
Unfortunately, story repeats itself - problems are found within the core
of a driver. Painting it with different colors does not change the fact
that is it still a house of cards. As changes needed to address those
issues would make Skylake solution incompatible with its previous
revisions, a decision has been made to provide a new solution instead.
In time it would deprecate and replace Skylake-driver.

That solution has been called AVS - from AudioDSP architecture name:
Audio-Voice-Speech. It is meant to provide support for the exact same
range of platforms as its predecessor: SKL, KBL, AML and APL.

Note: this series is dependent upon HDA-series [6] which exposes several
codec-organization functions allowing for reduced code size on
avs-driver side.

Note: this series does not add fully functional driver as its size would
get out of control. Here, focus is put on adding IPC protocol and code
loading code.
parents 375a347d 092cf7b2
......@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/pm_runtime.h>
#include <linux/timecounter.h>
#include <sound/core.h>
......@@ -448,6 +449,8 @@ static inline u16 snd_hdac_reg_readw(struct hdac_bus *bus, void __iomem *addr)
#define snd_hdac_reg_writel(bus, addr, val) writel(val, addr)
#define snd_hdac_reg_readl(bus, addr) readl(addr)
#define snd_hdac_reg_writeq(bus, addr, val) writeq(val, addr)
#define snd_hdac_reg_readq(bus, addr) readq(addr)
/*
* macros for easy use
......
......@@ -2,6 +2,8 @@
#ifndef __SOUND_HDAUDIO_EXT_H
#define __SOUND_HDAUDIO_EXT_H
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/iopoll.h>
#include <sound/hdaudio.h>
int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
......@@ -143,6 +145,54 @@ void snd_hdac_ext_bus_link_power(struct hdac_device *codec, bool enable);
writew(((readw(addr + reg) & ~(mask)) | (val)), \
addr + reg)
#define snd_hdac_adsp_writeb(chip, reg, value) \
snd_hdac_reg_writeb(chip, (chip)->dsp_ba + (reg), value)
#define snd_hdac_adsp_readb(chip, reg) \
snd_hdac_reg_readb(chip, (chip)->dsp_ba + (reg))
#define snd_hdac_adsp_writew(chip, reg, value) \
snd_hdac_reg_writew(chip, (chip)->dsp_ba + (reg), value)
#define snd_hdac_adsp_readw(chip, reg) \
snd_hdac_reg_readw(chip, (chip)->dsp_ba + (reg))
#define snd_hdac_adsp_writel(chip, reg, value) \
snd_hdac_reg_writel(chip, (chip)->dsp_ba + (reg), value)
#define snd_hdac_adsp_readl(chip, reg) \
snd_hdac_reg_readl(chip, (chip)->dsp_ba + (reg))
#define snd_hdac_adsp_writeq(chip, reg, value) \
snd_hdac_reg_writeq(chip, (chip)->dsp_ba + (reg), value)
#define snd_hdac_adsp_readq(chip, reg) \
snd_hdac_reg_readq(chip, (chip)->dsp_ba + (reg))
#define snd_hdac_adsp_updateb(chip, reg, mask, val) \
snd_hdac_adsp_writeb(chip, reg, \
(snd_hdac_adsp_readb(chip, reg) & ~(mask)) | (val))
#define snd_hdac_adsp_updatew(chip, reg, mask, val) \
snd_hdac_adsp_writew(chip, reg, \
(snd_hdac_adsp_readw(chip, reg) & ~(mask)) | (val))
#define snd_hdac_adsp_updatel(chip, reg, mask, val) \
snd_hdac_adsp_writel(chip, reg, \
(snd_hdac_adsp_readl(chip, reg) & ~(mask)) | (val))
#define snd_hdac_adsp_updateq(chip, reg, mask, val) \
snd_hdac_adsp_writeq(chip, reg, \
(snd_hdac_adsp_readq(chip, reg) & ~(mask)) | (val))
#define snd_hdac_adsp_readb_poll(chip, reg, val, cond, delay_us, timeout_us) \
readb_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
delay_us, timeout_us)
#define snd_hdac_adsp_readw_poll(chip, reg, val, cond, delay_us, timeout_us) \
readw_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
delay_us, timeout_us)
#define snd_hdac_adsp_readl_poll(chip, reg, val, cond, delay_us, timeout_us) \
readl_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
delay_us, timeout_us)
#define snd_hdac_adsp_readq_poll(chip, reg, val, cond, delay_us, timeout_us) \
readq_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
delay_us, timeout_us)
#define snd_hdac_stream_readb_poll(strm, reg, val, cond, delay_us, timeout_us) \
readb_poll_timeout((strm)->sd_addr + AZX_REG_ ## reg, val, cond, \
delay_us, timeout_us)
#define snd_hdac_stream_readl_poll(strm, reg, val, cond, delay_us, timeout_us) \
readl_poll_timeout((strm)->sd_addr + AZX_REG_ ## reg, val, cond, \
delay_us, timeout_us)
struct hdac_ext_device;
......
......@@ -429,6 +429,7 @@ struct snd_soc_dapm_widget *snd_soc_dapm_new_control_unlocked(
const struct snd_soc_dapm_widget *widget);
int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
struct snd_soc_dai *dai);
void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
......
......@@ -209,5 +209,17 @@ config SND_SOC_INTEL_KEEMBAY
If you have a Intel Keembay platform then enable this option
by saying Y or m.
config SND_SOC_INTEL_AVS
tristate "Intel AVS driver"
depends on PCI && ACPI
depends on COMMON_CLK
select SND_SOC_ACPI
select SND_HDA_EXT_CORE
select SND_HDA_DSP_LOADER
help
Enable support for Intel(R) cAVS 1.5 platforms with DSP
capabilities. This includes Skylake, Kabylake, Amberlake and
Apollolake.
# ASoC codec drivers
source "sound/soc/intel/boards/Kconfig"
......@@ -7,6 +7,7 @@ obj-$(CONFIG_SND_SST_ATOM_HIFI2_PLATFORM) += atom/
obj-$(CONFIG_SND_SOC_INTEL_CATPT) += catpt/
obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON) += skylake/
obj-$(CONFIG_SND_SOC_INTEL_KEEMBAY) += keembay/
obj-$(CONFIG_SND_SOC_INTEL_AVS) += avs/
# Machine support
obj-$(CONFIG_SND_SOC) += boards/
# SPDX-License-Identifier: GPL-2.0-only
snd-soc-avs-objs := dsp.o ipc.o messages.o utils.o core.o loader.o
snd-soc-avs-objs += cldma.o
obj-$(CONFIG_SND_SOC_INTEL_AVS) += snd-soc-avs.o
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
*
* Authors: Cezary Rojewski <cezary.rojewski@intel.com>
* Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
*/
#ifndef __SOUND_SOC_INTEL_AVS_H
#define __SOUND_SOC_INTEL_AVS_H
#include <linux/device.h>
#include <linux/firmware.h>
#include <sound/hda_codec.h>
#include <sound/hda_register.h>
#include "messages.h"
#include "registers.h"
struct avs_dev;
/*
* struct avs_dsp_ops - Platform-specific DSP operations
*
* @power: Power on or off DSP cores
* @reset: Enter or exit reset state on DSP cores
* @stall: Stall or run DSP cores
* @irq_handler: Top half of IPC servicing
* @irq_thread: Bottom half of IPC servicing
* @int_control: Enable or disable IPC interrupts
*/
struct avs_dsp_ops {
int (* const power)(struct avs_dev *, u32, bool);
int (* const reset)(struct avs_dev *, u32, bool);
int (* const stall)(struct avs_dev *, u32, bool);
irqreturn_t (* const irq_handler)(int, void *);
irqreturn_t (* const irq_thread)(int, void *);
void (* const int_control)(struct avs_dev *, bool);
int (* const load_basefw)(struct avs_dev *, struct firmware *);
int (* const load_lib)(struct avs_dev *, struct firmware *, u32);
int (* const transfer_mods)(struct avs_dev *, bool, struct avs_module_entry *, u32);
};
#define avs_dsp_op(adev, op, ...) \
((adev)->spec->dsp_ops->op(adev, ## __VA_ARGS__))
#define AVS_PLATATTR_CLDMA BIT_ULL(0)
#define AVS_PLATATTR_IMR BIT_ULL(1)
#define avs_platattr_test(adev, attr) \
((adev)->spec->attributes & AVS_PLATATTR_##attr)
/* Platform specific descriptor */
struct avs_spec {
const char *name;
const struct avs_dsp_ops *const dsp_ops;
struct avs_fw_version min_fw_version; /* anything below is rejected */
const u32 core_init_mask; /* used during DSP boot */
const u64 attributes; /* bitmask of AVS_PLATATTR_* */
const u32 sram_base_offset;
const u32 sram_window_size;
const u32 rom_status;
};
struct avs_fw_entry {
char *name;
const struct firmware *fw;
struct list_head node;
};
/*
* struct avs_dev - Intel HD-Audio driver data
*
* @dev: PCI device
* @dsp_ba: DSP bar address
* @spec: platform-specific descriptor
* @fw_cfg: Firmware configuration, obtained through FW_CONFIG message
* @hw_cfg: Hardware configuration, obtained through HW_CONFIG message
* @mods_info: Available module-types, obtained through MODULES_INFO message
* @mod_idas: Module instance ID pool, one per module-type
* @modres_mutex: For synchronizing any @mods_info updates
* @ppl_ida: Pipeline instance ID pool
* @fw_list: List of libraries loaded, including base firmware
*/
struct avs_dev {
struct hda_bus base;
struct device *dev;
void __iomem *dsp_ba;
const struct avs_spec *spec;
struct avs_ipc *ipc;
struct avs_fw_cfg fw_cfg;
struct avs_hw_cfg hw_cfg;
struct avs_mods_info *mods_info;
struct ida **mod_idas;
struct mutex modres_mutex;
struct ida ppl_ida;
struct list_head fw_list;
int *core_refs; /* reference count per core */
char **lib_names;
struct completion fw_ready;
};
/* from hda_bus to avs_dev */
#define hda_to_avs(hda) container_of(hda, struct avs_dev, base)
/* from hdac_bus to avs_dev */
#define hdac_to_avs(hdac) hda_to_avs(to_hda_bus(hdac))
/* from device to avs_dev */
#define to_avs_dev(dev) \
({ \
struct hdac_bus *__bus = dev_get_drvdata(dev); \
hdac_to_avs(__bus); \
})
int avs_dsp_core_power(struct avs_dev *adev, u32 core_mask, bool power);
int avs_dsp_core_reset(struct avs_dev *adev, u32 core_mask, bool reset);
int avs_dsp_core_stall(struct avs_dev *adev, u32 core_mask, bool stall);
int avs_dsp_core_enable(struct avs_dev *adev, u32 core_mask);
int avs_dsp_core_disable(struct avs_dev *adev, u32 core_mask);
/* Inter Process Communication */
struct avs_ipc_msg {
union {
u64 header;
union avs_global_msg glb;
union avs_reply_msg rsp;
};
void *data;
size_t size;
};
/*
* struct avs_ipc - DSP IPC context
*
* @dev: PCI device
* @rx: Reply message cache
* @default_timeout_ms: default message timeout in MS
* @ready: whether firmware is ready and communication is open
* @rx_completed: whether RX for previously sent TX has been received
* @rx_lock: for serializing manipulation of rx_* fields
* @msg_lock: for synchronizing request handling
* @done_completion: DONE-part of IPC i.e. ROM and ACKs from FW
* @busy_completion: BUSY-part of IPC i.e. receiving responses from FW
*/
struct avs_ipc {
struct device *dev;
struct avs_ipc_msg rx;
u32 default_timeout_ms;
bool ready;
bool rx_completed;
spinlock_t rx_lock;
struct mutex msg_mutex;
struct completion done_completion;
struct completion busy_completion;
};
#define AVS_EIPC EREMOTEIO
/*
* IPC handlers may return positive value (firmware error code) what denotes
* successful HOST <-> DSP communication yet failure to process specific request.
*
* Below macro converts returned value to linux kernel error code.
* All IPC callers MUST use it as soon as firmware error code is consumed.
*/
#define AVS_IPC_RET(ret) \
(((ret) <= 0) ? (ret) : -AVS_EIPC)
static inline void avs_ipc_err(struct avs_dev *adev, struct avs_ipc_msg *tx,
const char *name, int error)
{
/*
* If IPC channel is blocked e.g.: due to ongoing recovery,
* -EPERM error code is expected and thus it's not an actual error.
*/
if (error == -EPERM)
dev_dbg(adev->dev, "%s 0x%08x 0x%08x failed: %d\n", name,
tx->glb.primary, tx->glb.ext.val, error);
else
dev_err(adev->dev, "%s 0x%08x 0x%08x failed: %d\n", name,
tx->glb.primary, tx->glb.ext.val, error);
}
irqreturn_t avs_dsp_irq_handler(int irq, void *dev_id);
irqreturn_t avs_dsp_irq_thread(int irq, void *dev_id);
void avs_dsp_process_response(struct avs_dev *adev, u64 header);
int avs_dsp_send_msg_timeout(struct avs_dev *adev,
struct avs_ipc_msg *request,
struct avs_ipc_msg *reply, int timeout);
int avs_dsp_send_msg(struct avs_dev *adev,
struct avs_ipc_msg *request, struct avs_ipc_msg *reply);
int avs_dsp_send_rom_msg_timeout(struct avs_dev *adev,
struct avs_ipc_msg *request, int timeout);
int avs_dsp_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request);
void avs_dsp_interrupt_control(struct avs_dev *adev, bool enable);
int avs_ipc_init(struct avs_ipc *ipc, struct device *dev);
void avs_ipc_block(struct avs_ipc *ipc);
/* Firmware resources management */
int avs_get_module_entry(struct avs_dev *adev, const guid_t *uuid, struct avs_module_entry *entry);
int avs_get_module_id_entry(struct avs_dev *adev, u32 module_id, struct avs_module_entry *entry);
int avs_get_module_id(struct avs_dev *adev, const guid_t *uuid);
bool avs_is_module_ida_empty(struct avs_dev *adev, u32 module_id);
int avs_module_info_init(struct avs_dev *adev, bool purge);
void avs_module_info_free(struct avs_dev *adev);
int avs_module_id_alloc(struct avs_dev *adev, u16 module_id);
void avs_module_id_free(struct avs_dev *adev, u16 module_id, u8 instance_id);
int avs_request_firmware(struct avs_dev *adev, const struct firmware **fw_p, const char *name);
void avs_release_last_firmware(struct avs_dev *adev);
void avs_release_firmwares(struct avs_dev *adev);
int avs_dsp_init_module(struct avs_dev *adev, u16 module_id, u8 ppl_instance_id,
u8 core_id, u8 domain, void *param, u32 param_size,
u16 *instance_id);
void avs_dsp_delete_module(struct avs_dev *adev, u16 module_id, u16 instance_id,
u8 ppl_instance_id, u8 core_id);
int avs_dsp_create_pipeline(struct avs_dev *adev, u16 req_size, u8 priority,
bool lp, u16 attributes, u8 *instance_id);
int avs_dsp_delete_pipeline(struct avs_dev *adev, u8 instance_id);
/* Firmware loading */
void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable);
void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable);
void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable);
int avs_dsp_boot_firmware(struct avs_dev *adev, bool purge);
int avs_dsp_first_boot_firmware(struct avs_dev *adev);
int avs_cldma_load_basefw(struct avs_dev *adev, struct firmware *fw);
int avs_cldma_load_library(struct avs_dev *adev, struct firmware *lib, u32 id);
int avs_cldma_transfer_modules(struct avs_dev *adev, bool load,
struct avs_module_entry *mods, u32 num_mods);
int avs_hda_load_basefw(struct avs_dev *adev, struct firmware *fw);
int avs_hda_load_library(struct avs_dev *adev, struct firmware *lib, u32 id);
int avs_hda_transfer_modules(struct avs_dev *adev, bool load,
struct avs_module_entry *mods, u32 num_mods);
#endif /* __SOUND_SOC_INTEL_AVS_H */
// SPDX-License-Identifier: GPL-2.0-only
//
// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
//
// Author: Cezary Rojewski <cezary.rojewski@intel.com>
//
#include <linux/pci.h>
#include <sound/hda_register.h>
#include <sound/hdaudio_ext.h>
#include "cldma.h"
#include "registers.h"
/* Stream Registers */
#define AZX_CL_SD_BASE 0x80
#define AZX_SD_CTL_STRM_MASK GENMASK(23, 20)
#define AZX_SD_CTL_STRM(s) (((s)->stream_tag << 20) & AZX_SD_CTL_STRM_MASK)
#define AZX_SD_BDLPL_BDLPLBA_MASK GENMASK(31, 7)
#define AZX_SD_BDLPL_BDLPLBA(lb) ((lb) & AZX_SD_BDLPL_BDLPLBA_MASK)
/* Software Position Based FIFO Capability Registers */
#define AZX_CL_SPBFCS 0x20
#define AZX_REG_CL_SPBFCTL (AZX_CL_SPBFCS + 0x4)
#define AZX_REG_CL_SD_SPIB (AZX_CL_SPBFCS + 0x8)
#define AVS_CL_OP_INTERVAL_US 3
#define AVS_CL_OP_TIMEOUT_US 300
#define AVS_CL_IOC_TIMEOUT_MS 300
#define AVS_CL_STREAM_INDEX 0
struct hda_cldma {
struct device *dev;
struct hdac_bus *bus;
void __iomem *dsp_ba;
unsigned int buffer_size;
unsigned int num_periods;
unsigned int stream_tag;
void __iomem *sd_addr;
struct snd_dma_buffer dmab_data;
struct snd_dma_buffer dmab_bdl;
struct delayed_work memcpy_work;
struct completion completion;
/* runtime */
void *position;
unsigned int remaining;
unsigned int sd_status;
};
static void cldma_memcpy_work(struct work_struct *work);
struct hda_cldma code_loader = {
.stream_tag = AVS_CL_STREAM_INDEX + 1,
.memcpy_work = __DELAYED_WORK_INITIALIZER(code_loader.memcpy_work, cldma_memcpy_work, 0),
.completion = COMPLETION_INITIALIZER(code_loader.completion),
};
void hda_cldma_fill(struct hda_cldma *cl)
{
unsigned int size, offset;
if (cl->remaining > cl->buffer_size)
size = cl->buffer_size;
else
size = cl->remaining;
offset = snd_hdac_stream_readl(cl, CL_SD_SPIB);
if (offset + size > cl->buffer_size) {
unsigned int ss;
ss = cl->buffer_size - offset;
memcpy(cl->dmab_data.area + offset, cl->position, ss);
offset = 0;
size -= ss;
cl->position += ss;
cl->remaining -= ss;
}
memcpy(cl->dmab_data.area + offset, cl->position, size);
cl->position += size;
cl->remaining -= size;
snd_hdac_stream_writel(cl, CL_SD_SPIB, offset + size);
}
static void cldma_memcpy_work(struct work_struct *work)
{
struct hda_cldma *cl = container_of(work, struct hda_cldma, memcpy_work.work);
int ret;
ret = hda_cldma_start(cl);
if (ret < 0) {
dev_err(cl->dev, "cldma set RUN failed: %d\n", ret);
return;
}
while (true) {
ret = wait_for_completion_timeout(&cl->completion,
msecs_to_jiffies(AVS_CL_IOC_TIMEOUT_MS));
if (!ret) {
dev_err(cl->dev, "cldma IOC timeout\n");
break;
}
if (!(cl->sd_status & SD_INT_COMPLETE)) {
dev_err(cl->dev, "cldma transfer error, SD status: 0x%08x\n",
cl->sd_status);
break;
}
if (!cl->remaining)
break;
reinit_completion(&cl->completion);
hda_cldma_fill(cl);
/* enable CLDMA interrupt */
snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA,
AVS_ADSP_ADSPIC_CLDMA);
}
}
void hda_cldma_transfer(struct hda_cldma *cl, unsigned long start_delay)
{
if (!cl->remaining)
return;
reinit_completion(&cl->completion);
/* fill buffer with the first chunk before scheduling run */
hda_cldma_fill(cl);
schedule_delayed_work(&cl->memcpy_work, start_delay);
}
int hda_cldma_start(struct hda_cldma *cl)
{
unsigned int reg;
/* enable interrupts */
snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA,
AVS_ADSP_ADSPIC_CLDMA);
snd_hdac_stream_updateb(cl, SD_CTL, SD_INT_MASK | SD_CTL_DMA_START,
SD_INT_MASK | SD_CTL_DMA_START);
/* await DMA engine start */
return snd_hdac_stream_readb_poll(cl, SD_CTL, reg, reg & SD_CTL_DMA_START,
AVS_CL_OP_INTERVAL_US, AVS_CL_OP_TIMEOUT_US);
}
int hda_cldma_stop(struct hda_cldma *cl)
{
unsigned int reg;
int ret;
/* disable interrupts */
snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA, 0);
snd_hdac_stream_updateb(cl, SD_CTL, SD_INT_MASK | SD_CTL_DMA_START, 0);
/* await DMA engine stop */
ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, !(reg & SD_CTL_DMA_START),
AVS_CL_OP_INTERVAL_US, AVS_CL_OP_TIMEOUT_US);
cancel_delayed_work_sync(&cl->memcpy_work);
return ret;
}
int hda_cldma_reset(struct hda_cldma *cl)
{
unsigned int reg;
int ret;
ret = hda_cldma_stop(cl);
if (ret < 0) {
dev_err(cl->dev, "cldma stop failed: %d\n", ret);
return ret;
}
snd_hdac_stream_updateb(cl, SD_CTL, 1, 1);
ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, (reg & 1), AVS_CL_OP_INTERVAL_US,
AVS_CL_OP_TIMEOUT_US);
if (ret < 0) {
dev_err(cl->dev, "cldma set SRST failed: %d\n", ret);
return ret;
}
snd_hdac_stream_updateb(cl, SD_CTL, 1, 0);
ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, !(reg & 1), AVS_CL_OP_INTERVAL_US,
AVS_CL_OP_TIMEOUT_US);
if (ret < 0) {
dev_err(cl->dev, "cldma unset SRST failed: %d\n", ret);
return ret;
}
return 0;
}
void hda_cldma_set_data(struct hda_cldma *cl, void *data, unsigned int size)
{
/* setup runtime */
cl->position = data;
cl->remaining = size;
}
static void cldma_setup_bdle(struct hda_cldma *cl, u32 bdle_size)
{
struct snd_dma_buffer *dmab = &cl->dmab_data;
__le32 *bdl = (__le32 *)cl->dmab_bdl.area;
int remaining = cl->buffer_size;
int offset = 0;
cl->num_periods = 0;
while (remaining > 0) {
phys_addr_t addr;
int chunk;
addr = snd_sgbuf_get_addr(dmab, offset);
bdl[0] = cpu_to_le32(lower_32_bits(addr));
bdl[1] = cpu_to_le32(upper_32_bits(addr));
chunk = snd_sgbuf_get_chunk_size(dmab, offset, bdle_size);
bdl[2] = cpu_to_le32(chunk);
remaining -= chunk;
/* set IOC only for the last entry */
bdl[3] = (remaining > 0) ? 0 : cpu_to_le32(0x01);
bdl += 4;
offset += chunk;
cl->num_periods++;
}
}
void hda_cldma_setup(struct hda_cldma *cl)
{
dma_addr_t bdl_addr = cl->dmab_bdl.addr;
cldma_setup_bdle(cl, cl->buffer_size / 2);
snd_hdac_stream_writel(cl, SD_BDLPL, AZX_SD_BDLPL_BDLPLBA(lower_32_bits(bdl_addr)));
snd_hdac_stream_writel(cl, SD_BDLPU, upper_32_bits(bdl_addr));
snd_hdac_stream_writel(cl, SD_CBL, cl->buffer_size);
snd_hdac_stream_writeb(cl, SD_LVI, cl->num_periods - 1);
snd_hdac_stream_updatel(cl, SD_CTL, AZX_SD_CTL_STRM_MASK, AZX_SD_CTL_STRM(cl));
/* enable spib */
snd_hdac_stream_writel(cl, CL_SPBFCTL, 1);
}
static irqreturn_t cldma_irq_handler(int irq, void *dev_id)
{
struct hda_cldma *cl = dev_id;
u32 adspis;
adspis = snd_hdac_adsp_readl(cl, AVS_ADSP_REG_ADSPIS);
if (adspis == UINT_MAX)
return IRQ_NONE;
if (!(adspis & AVS_ADSP_ADSPIS_CLDMA))
return IRQ_NONE;
cl->sd_status = snd_hdac_stream_readb(cl, SD_STS);
dev_warn(cl->dev, "%s sd_status: 0x%08x\n", __func__, cl->sd_status);
/* disable CLDMA interrupt */
snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA, 0);
complete(&cl->completion);
return IRQ_HANDLED;
}
int hda_cldma_init(struct hda_cldma *cl, struct hdac_bus *bus, void __iomem *dsp_ba,
unsigned int buffer_size)
{
struct pci_dev *pci = to_pci_dev(bus->dev);
int ret;
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, bus->dev, buffer_size, &cl->dmab_data);
if (ret < 0)
return ret;
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, bus->dev, BDL_SIZE, &cl->dmab_bdl);
if (ret < 0)
goto alloc_err;
cl->dev = bus->dev;
cl->bus = bus;
cl->dsp_ba = dsp_ba;
cl->buffer_size = buffer_size;
cl->sd_addr = dsp_ba + AZX_CL_SD_BASE;
ret = pci_request_irq(pci, 0, cldma_irq_handler, NULL, cl, "CLDMA");
if (ret < 0) {
dev_err(cl->dev, "Failed to request CLDMA IRQ handler: %d\n", ret);
goto req_err;
}
return 0;
req_err:
snd_dma_free_pages(&cl->dmab_bdl);
alloc_err:
snd_dma_free_pages(&cl->dmab_data);
return ret;
}
void hda_cldma_free(struct hda_cldma *cl)
{
struct pci_dev *pci = to_pci_dev(cl->dev);
pci_free_irq(pci, 0, cl);
snd_dma_free_pages(&cl->dmab_data);
snd_dma_free_pages(&cl->dmab_bdl);
}
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
*
* Author: Cezary Rojewski <cezary.rojewski@intel.com>
*/
#ifndef __SOUND_SOC_INTEL_AVS_CLDMA_H
#define __SOUND_SOC_INTEL_AVS_CLDMA_H
#define AVS_CL_DEFAULT_BUFFER_SIZE (32 * PAGE_SIZE)
struct hda_cldma;
extern struct hda_cldma code_loader;
void hda_cldma_fill(struct hda_cldma *cl);
void hda_cldma_transfer(struct hda_cldma *cl, unsigned long start_delay);
int hda_cldma_start(struct hda_cldma *cl);
int hda_cldma_stop(struct hda_cldma *cl);
int hda_cldma_reset(struct hda_cldma *cl);
void hda_cldma_set_data(struct hda_cldma *cl, void *data, unsigned int size);
void hda_cldma_setup(struct hda_cldma *cl);
int hda_cldma_init(struct hda_cldma *cl, struct hdac_bus *bus, void __iomem *dsp_ba,
unsigned int buffer_size);
void hda_cldma_free(struct hda_cldma *cl);
#endif
// SPDX-License-Identifier: GPL-2.0-only
//
// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
//
// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
//
// Special thanks to:
// Krzysztof Hejmowski <krzysztof.hejmowski@intel.com>
// Michal Sienkiewicz <michal.sienkiewicz@intel.com>
// Filip Proborszcz
//
// for sharing Intel AudioDSP expertise and helping shape the very
// foundation of this driver
//
#include <linux/pci.h>
#include <sound/hdaudio.h>
#include "avs.h"
static void
avs_hda_update_config_dword(struct hdac_bus *bus, u32 reg, u32 mask, u32 value)
{
struct pci_dev *pci = to_pci_dev(bus->dev);
u32 data;
pci_read_config_dword(pci, reg, &data);
data &= ~mask;
data |= (value & mask);
pci_write_config_dword(pci, reg, data);
}
void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable)
{
u32 value;
value = enable ? 0 : AZX_PGCTL_LSRMD_MASK;
avs_hda_update_config_dword(&adev->base.core, AZX_PCIREG_PGCTL,
AZX_PGCTL_LSRMD_MASK, value);
}
static void avs_hdac_clock_gating_enable(struct hdac_bus *bus, bool enable)
{
u32 value;
value = enable ? AZX_CGCTL_MISCBDCGE_MASK : 0;
avs_hda_update_config_dword(bus, AZX_PCIREG_CGCTL, AZX_CGCTL_MISCBDCGE_MASK, value);
}
void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable)
{
avs_hdac_clock_gating_enable(&adev->base.core, enable);
}
void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable)
{
u32 value;
value = enable ? AZX_VS_EM2_L1SEN : 0;
snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN, value);
}
// SPDX-License-Identifier: GPL-2.0-only
//
// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
//
// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
//
#include <linux/module.h>
#include <sound/hdaudio_ext.h>
#include "avs.h"
#include "registers.h"
#define AVS_ADSPCS_INTERVAL_US 500
#define AVS_ADSPCS_TIMEOUT_US 50000
int avs_dsp_core_power(struct avs_dev *adev, u32 core_mask, bool power)
{
u32 value, mask, reg;
int ret;
mask = AVS_ADSPCS_SPA_MASK(core_mask);
value = power ? mask : 0;
snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value);
mask = AVS_ADSPCS_CPA_MASK(core_mask);
value = power ? mask : 0;
ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS,
reg, (reg & mask) == value,
AVS_ADSPCS_INTERVAL_US,
AVS_ADSPCS_TIMEOUT_US);
if (ret)
dev_err(adev->dev, "core_mask %d power %s failed: %d\n",
core_mask, power ? "on" : "off", ret);
return ret;
}
int avs_dsp_core_reset(struct avs_dev *adev, u32 core_mask, bool reset)
{
u32 value, mask, reg;
int ret;
mask = AVS_ADSPCS_CRST_MASK(core_mask);
value = reset ? mask : 0;
snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value);
ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS,
reg, (reg & mask) == value,
AVS_ADSPCS_INTERVAL_US,
AVS_ADSPCS_TIMEOUT_US);
if (ret)
dev_err(adev->dev, "core_mask %d %s reset failed: %d\n",
core_mask, reset ? "enter" : "exit", ret);
return ret;
}
int avs_dsp_core_stall(struct avs_dev *adev, u32 core_mask, bool stall)
{
u32 value, mask, reg;
int ret;
mask = AVS_ADSPCS_CSTALL_MASK(core_mask);
value = stall ? mask : 0;
snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value);
ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS,
reg, (reg & mask) == value,
AVS_ADSPCS_INTERVAL_US,
AVS_ADSPCS_TIMEOUT_US);
if (ret)
dev_err(adev->dev, "core_mask %d %sstall failed: %d\n",
core_mask, stall ? "" : "un", ret);
return ret;
}
int avs_dsp_core_enable(struct avs_dev *adev, u32 core_mask)
{
int ret;
ret = avs_dsp_op(adev, power, core_mask, true);
if (ret)
return ret;
ret = avs_dsp_op(adev, reset, core_mask, false);
if (ret)
return ret;
return avs_dsp_op(adev, stall, core_mask, false);
}
int avs_dsp_core_disable(struct avs_dev *adev, u32 core_mask)
{
/* No error checks to allow for complete DSP shutdown. */
avs_dsp_op(adev, stall, core_mask, true);
avs_dsp_op(adev, reset, core_mask, true);
return avs_dsp_op(adev, power, core_mask, false);
}
static int avs_dsp_enable(struct avs_dev *adev, u32 core_mask)
{
u32 mask;
int ret;
ret = avs_dsp_core_enable(adev, core_mask);
if (ret < 0)
return ret;
mask = core_mask & ~AVS_MAIN_CORE_MASK;
if (!mask)
/*
* without main core, fw is dead anyway
* so setting D0 for it is futile.
*/
return 0;
ret = avs_ipc_set_dx(adev, mask, true);
return AVS_IPC_RET(ret);
}
static int avs_dsp_disable(struct avs_dev *adev, u32 core_mask)
{
int ret;
ret = avs_ipc_set_dx(adev, core_mask, false);
if (ret)
return AVS_IPC_RET(ret);
return avs_dsp_core_disable(adev, core_mask);
}
static int avs_dsp_get_core(struct avs_dev *adev, u32 core_id)
{
u32 mask;
int ret;
mask = BIT_MASK(core_id);
if (mask == AVS_MAIN_CORE_MASK)
/* nothing to do for main core */
return 0;
if (core_id >= adev->hw_cfg.dsp_cores) {
ret = -EINVAL;
goto err;
}
adev->core_refs[core_id]++;
if (adev->core_refs[core_id] == 1) {
ret = avs_dsp_enable(adev, mask);
if (ret)
goto err_enable_dsp;
}
return 0;
err_enable_dsp:
adev->core_refs[core_id]--;
err:
dev_err(adev->dev, "get core %d failed: %d\n", core_id, ret);
return ret;
}
static int avs_dsp_put_core(struct avs_dev *adev, u32 core_id)
{
u32 mask;
int ret;
mask = BIT_MASK(core_id);
if (mask == AVS_MAIN_CORE_MASK)
/* nothing to do for main core */
return 0;
if (core_id >= adev->hw_cfg.dsp_cores) {
ret = -EINVAL;
goto err;
}
adev->core_refs[core_id]--;
if (!adev->core_refs[core_id]) {
ret = avs_dsp_disable(adev, mask);
if (ret)
goto err;
}
return 0;
err:
dev_err(adev->dev, "put core %d failed: %d\n", core_id, ret);
return ret;
}
int avs_dsp_init_module(struct avs_dev *adev, u16 module_id, u8 ppl_instance_id,
u8 core_id, u8 domain, void *param, u32 param_size,
u16 *instance_id)
{
struct avs_module_entry mentry;
bool was_loaded = false;
int ret, id;
id = avs_module_id_alloc(adev, module_id);
if (id < 0)
return id;
ret = avs_get_module_id_entry(adev, module_id, &mentry);
if (ret)
goto err_mod_entry;
ret = avs_dsp_get_core(adev, core_id);
if (ret)
goto err_mod_entry;
/* Load code into memory if this is the first instance. */
if (!id && !avs_module_entry_is_loaded(&mentry)) {
ret = avs_dsp_op(adev, transfer_mods, true, &mentry, 1);
if (ret) {
dev_err(adev->dev, "load modules failed: %d\n", ret);
goto err_mod_entry;
}
was_loaded = true;
}
ret = avs_ipc_init_instance(adev, module_id, id, ppl_instance_id,
core_id, domain, param, param_size);
if (ret) {
ret = AVS_IPC_RET(ret);
goto err_ipc;
}
*instance_id = id;
return 0;
err_ipc:
if (was_loaded)
avs_dsp_op(adev, transfer_mods, false, &mentry, 1);
avs_dsp_put_core(adev, core_id);
err_mod_entry:
avs_module_id_free(adev, module_id, id);
return ret;
}
void avs_dsp_delete_module(struct avs_dev *adev, u16 module_id, u16 instance_id,
u8 ppl_instance_id, u8 core_id)
{
struct avs_module_entry mentry;
int ret;
/* Modules not owned by any pipeline need to be freed explicitly. */
if (ppl_instance_id == INVALID_PIPELINE_ID)
avs_ipc_delete_instance(adev, module_id, instance_id);
avs_module_id_free(adev, module_id, instance_id);
ret = avs_get_module_id_entry(adev, module_id, &mentry);
/* Unload occupied memory if this was the last instance. */
if (!ret && mentry.type.load_type == AVS_MODULE_LOAD_TYPE_LOADABLE) {
if (avs_is_module_ida_empty(adev, module_id)) {
ret = avs_dsp_op(adev, transfer_mods, false, &mentry, 1);
if (ret)
dev_err(adev->dev, "unload modules failed: %d\n", ret);
}
}
avs_dsp_put_core(adev, core_id);
}
int avs_dsp_create_pipeline(struct avs_dev *adev, u16 req_size, u8 priority,
bool lp, u16 attributes, u8 *instance_id)
{
struct avs_fw_cfg *fw_cfg = &adev->fw_cfg;
int ret, id;
id = ida_alloc_max(&adev->ppl_ida, fw_cfg->max_ppl_count - 1, GFP_KERNEL);
if (id < 0)
return id;
ret = avs_ipc_create_pipeline(adev, req_size, priority, id, lp, attributes);
if (ret) {
ida_free(&adev->ppl_ida, id);
return AVS_IPC_RET(ret);
}
*instance_id = id;
return 0;
}
int avs_dsp_delete_pipeline(struct avs_dev *adev, u8 instance_id)
{
int ret;
ret = avs_ipc_delete_pipeline(adev, instance_id);
if (ret)
ret = AVS_IPC_RET(ret);
ida_free(&adev->ppl_ida, instance_id);
return ret;
}
MODULE_LICENSE("GPL");
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
*
* Authors: Cezary Rojewski <cezary.rojewski@intel.com>
* Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
*/
#ifndef __SOUND_SOC_INTEL_AVS_REGS_H
#define __SOUND_SOC_INTEL_AVS_REGS_H
#define AZX_PCIREG_PGCTL 0x44
#define AZX_PCIREG_CGCTL 0x48
#define AZX_PGCTL_LSRMD_MASK BIT(4)
#define AZX_CGCTL_MISCBDCGE_MASK BIT(6)
#define AZX_VS_EM2_L1SEN BIT(13)
/* Intel HD Audio General DSP Registers */
#define AVS_ADSP_GEN_BASE 0x0
#define AVS_ADSP_REG_ADSPCS (AVS_ADSP_GEN_BASE + 0x04)
#define AVS_ADSP_REG_ADSPIC (AVS_ADSP_GEN_BASE + 0x08)
#define AVS_ADSP_REG_ADSPIS (AVS_ADSP_GEN_BASE + 0x0C)
#define AVS_ADSP_ADSPIC_IPC BIT(0)
#define AVS_ADSP_ADSPIC_CLDMA BIT(1)
#define AVS_ADSP_ADSPIS_IPC BIT(0)
#define AVS_ADSP_ADSPIS_CLDMA BIT(1)
#define AVS_ADSPCS_CRST_MASK(cm) (cm)
#define AVS_ADSPCS_CSTALL_MASK(cm) ((cm) << 8)
#define AVS_ADSPCS_SPA_MASK(cm) ((cm) << 16)
#define AVS_ADSPCS_CPA_MASK(cm) ((cm) << 24)
#define AVS_MAIN_CORE_MASK BIT(0)
#define AVS_ADSP_HIPCCTL_BUSY BIT(0)
#define AVS_ADSP_HIPCCTL_DONE BIT(1)
/* SKL Intel HD Audio Inter-Processor Communication Registers */
#define SKL_ADSP_IPC_BASE 0x40
#define SKL_ADSP_REG_HIPCT (SKL_ADSP_IPC_BASE + 0x00)
#define SKL_ADSP_REG_HIPCTE (SKL_ADSP_IPC_BASE + 0x04)
#define SKL_ADSP_REG_HIPCI (SKL_ADSP_IPC_BASE + 0x08)
#define SKL_ADSP_REG_HIPCIE (SKL_ADSP_IPC_BASE + 0x0C)
#define SKL_ADSP_REG_HIPCCTL (SKL_ADSP_IPC_BASE + 0x10)
#define SKL_ADSP_HIPCI_BUSY BIT(31)
#define SKL_ADSP_HIPCIE_DONE BIT(30)
#define SKL_ADSP_HIPCT_BUSY BIT(31)
/* Constants used when accessing SRAM, space shared with firmware */
#define AVS_FW_REG_BASE(adev) ((adev)->spec->sram_base_offset)
#define AVS_FW_REG_STATUS(adev) (AVS_FW_REG_BASE(adev) + 0x0)
#define AVS_FW_REG_ERROR_CODE(adev) (AVS_FW_REG_BASE(adev) + 0x4)
#define AVS_FW_REGS_SIZE PAGE_SIZE
#define AVS_FW_REGS_WINDOW 0
/* DSP -> HOST communication window */
#define AVS_UPLINK_WINDOW AVS_FW_REGS_WINDOW
/* HOST -> DSP communication window */
#define AVS_DOWNLINK_WINDOW 1
/* registry I/O helpers */
#define avs_sram_offset(adev, window_idx) \
((adev)->spec->sram_base_offset + \
(adev)->spec->sram_window_size * (window_idx))
#define avs_sram_addr(adev, window_idx) \
((adev)->dsp_ba + avs_sram_offset(adev, window_idx))
#define avs_uplink_addr(adev) \
(avs_sram_addr(adev, AVS_UPLINK_WINDOW) + AVS_FW_REGS_SIZE)
#define avs_downlink_addr(adev) \
avs_sram_addr(adev, AVS_DOWNLINK_WINDOW)
#endif /* __SOUND_SOC_INTEL_AVS_REGS_H */
// SPDX-License-Identifier: GPL-2.0-only
//
// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
//
// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
//
#include <linux/firmware.h>
#include <linux/slab.h>
#include "avs.h"
#include "messages.h"
/* Caller responsible for holding adev->modres_mutex. */
static int avs_module_entry_index(struct avs_dev *adev, const guid_t *uuid)
{
int i;
for (i = 0; i < adev->mods_info->count; i++) {
struct avs_module_entry *module;
module = &adev->mods_info->entries[i];
if (guid_equal(&module->uuid, uuid))
return i;
}
return -ENOENT;
}
/* Caller responsible for holding adev->modres_mutex. */
static int avs_module_id_entry_index(struct avs_dev *adev, u32 module_id)
{
int i;
for (i = 0; i < adev->mods_info->count; i++) {
struct avs_module_entry *module;
module = &adev->mods_info->entries[i];
if (module->module_id == module_id)
return i;
}
return -ENOENT;
}
int avs_get_module_entry(struct avs_dev *adev, const guid_t *uuid, struct avs_module_entry *entry)
{
int idx;
mutex_lock(&adev->modres_mutex);
idx = avs_module_entry_index(adev, uuid);
if (idx >= 0)
memcpy(entry, &adev->mods_info->entries[idx], sizeof(*entry));
mutex_unlock(&adev->modres_mutex);
return (idx < 0) ? idx : 0;
}
int avs_get_module_id_entry(struct avs_dev *adev, u32 module_id, struct avs_module_entry *entry)
{
int idx;
mutex_lock(&adev->modres_mutex);
idx = avs_module_id_entry_index(adev, module_id);
if (idx >= 0)
memcpy(entry, &adev->mods_info->entries[idx], sizeof(*entry));
mutex_unlock(&adev->modres_mutex);
return (idx < 0) ? idx : 0;
}
int avs_get_module_id(struct avs_dev *adev, const guid_t *uuid)
{
struct avs_module_entry module;
int ret;
ret = avs_get_module_entry(adev, uuid, &module);
return !ret ? module.module_id : -ENOENT;
}
bool avs_is_module_ida_empty(struct avs_dev *adev, u32 module_id)
{
bool ret = false;
int idx;
mutex_lock(&adev->modres_mutex);
idx = avs_module_id_entry_index(adev, module_id);
if (idx >= 0)
ret = ida_is_empty(adev->mod_idas[idx]);
mutex_unlock(&adev->modres_mutex);
return ret;
}
/* Caller responsible for holding adev->modres_mutex. */
static void avs_module_ida_destroy(struct avs_dev *adev)
{
int i = adev->mods_info ? adev->mods_info->count : 0;
while (i--) {
ida_destroy(adev->mod_idas[i]);
kfree(adev->mod_idas[i]);
}
kfree(adev->mod_idas);
}
/* Caller responsible for holding adev->modres_mutex. */
static int
avs_module_ida_alloc(struct avs_dev *adev, struct avs_mods_info *newinfo, bool purge)
{
struct avs_mods_info *oldinfo = adev->mods_info;
struct ida **ida_ptrs;
u32 tocopy_count = 0;
int i;
if (!purge && oldinfo) {
if (oldinfo->count >= newinfo->count)
dev_warn(adev->dev, "refreshing %d modules info with %d\n",
oldinfo->count, newinfo->count);
tocopy_count = oldinfo->count;
}
ida_ptrs = kcalloc(newinfo->count, sizeof(*ida_ptrs), GFP_KERNEL);
if (!ida_ptrs)
return -ENOMEM;
if (tocopy_count)
memcpy(ida_ptrs, adev->mod_idas, tocopy_count * sizeof(*ida_ptrs));
for (i = tocopy_count; i < newinfo->count; i++) {
ida_ptrs[i] = kzalloc(sizeof(**ida_ptrs), GFP_KERNEL);
if (!ida_ptrs[i]) {
while (i--)
kfree(ida_ptrs[i]);
kfree(ida_ptrs);
return -ENOMEM;
}
ida_init(ida_ptrs[i]);
}
/* If old elements have been reused, don't wipe them. */
if (tocopy_count)
kfree(adev->mod_idas);
else
avs_module_ida_destroy(adev);
adev->mod_idas = ida_ptrs;
return 0;
}
int avs_module_info_init(struct avs_dev *adev, bool purge)
{
struct avs_mods_info *info;
int ret;
ret = avs_ipc_get_modules_info(adev, &info);
if (ret)
return AVS_IPC_RET(ret);
mutex_lock(&adev->modres_mutex);
ret = avs_module_ida_alloc(adev, info, purge);
if (ret < 0) {
dev_err(adev->dev, "initialize module idas failed: %d\n", ret);
goto exit;
}
/* Refresh current information with newly received table. */
kfree(adev->mods_info);
adev->mods_info = info;
exit:
mutex_unlock(&adev->modres_mutex);
return ret;
}
void avs_module_info_free(struct avs_dev *adev)
{
mutex_lock(&adev->modres_mutex);
avs_module_ida_destroy(adev);
kfree(adev->mods_info);
adev->mods_info = NULL;
mutex_unlock(&adev->modres_mutex);
}
int avs_module_id_alloc(struct avs_dev *adev, u16 module_id)
{
int ret, idx, max_id;
mutex_lock(&adev->modres_mutex);
idx = avs_module_id_entry_index(adev, module_id);
if (idx == -ENOENT) {
dev_err(adev->dev, "invalid module id: %d", module_id);
ret = -EINVAL;
goto exit;
}
max_id = adev->mods_info->entries[idx].instance_max_count - 1;
ret = ida_alloc_max(adev->mod_idas[idx], max_id, GFP_KERNEL);
exit:
mutex_unlock(&adev->modres_mutex);
return ret;
}
void avs_module_id_free(struct avs_dev *adev, u16 module_id, u8 instance_id)
{
int idx;
mutex_lock(&adev->modres_mutex);
idx = avs_module_id_entry_index(adev, module_id);
if (idx == -ENOENT) {
dev_err(adev->dev, "invalid module id: %d", module_id);
goto exit;
}
ida_free(adev->mod_idas[idx], instance_id);
exit:
mutex_unlock(&adev->modres_mutex);
}
/*
* Once driver loads FW it should keep it in memory, so we are not affected
* by FW removal from filesystem or even worse by loading different FW at
* runtime suspend/resume.
*/
int avs_request_firmware(struct avs_dev *adev, const struct firmware **fw_p, const char *name)
{
struct avs_fw_entry *entry;
int ret;
/* first check in list if it is not already loaded */
list_for_each_entry(entry, &adev->fw_list, node) {
if (!strcmp(name, entry->name)) {
*fw_p = entry->fw;
return 0;
}
}
/* FW is not loaded, let's load it now and add to the list */
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->name = kstrdup(name, GFP_KERNEL);
if (!entry->name) {
kfree(entry);
return -ENOMEM;
}
ret = request_firmware(&entry->fw, name, adev->dev);
if (ret < 0) {
kfree(entry->name);
kfree(entry);
return ret;
}
*fw_p = entry->fw;
list_add_tail(&entry->node, &adev->fw_list);
return 0;
}
/*
* Release single FW entry, used to handle errors in functions calling
* avs_request_firmware()
*/
void avs_release_last_firmware(struct avs_dev *adev)
{
struct avs_fw_entry *entry;
entry = list_last_entry(&adev->fw_list, typeof(*entry), node);
list_del(&entry->node);
release_firmware(entry->fw);
kfree(entry->name);
kfree(entry);
}
/*
* Release all FW entries, used on driver removal
*/
void avs_release_firmwares(struct avs_dev *adev)
{
struct avs_fw_entry *entry, *tmp;
list_for_each_entry_safe(entry, tmp, &adev->fw_list, node) {
list_del(&entry->node);
release_firmware(entry->fw);
kfree(entry->name);
kfree(entry);
}
}
......@@ -2465,6 +2465,7 @@ struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component,
dev_dbg(dev, "ASoC: Registered DAI '%s'\n", dai->name);
return dai;
}
EXPORT_SYMBOL_GPL(snd_soc_register_dai);
/**
* snd_soc_unregister_dais - Unregister DAIs from the ASoC core
......
......@@ -2484,6 +2484,12 @@ static void dapm_free_path(struct snd_soc_dapm_path *path)
kfree(path);
}
/**
* snd_soc_dapm_free_widget - Free specified widget
* @w: widget to free
*
* Removes widget from all paths and frees memory occupied by it.
*/
void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
{
struct snd_soc_dapm_path *p, *next_p;
......@@ -2506,6 +2512,7 @@ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
kfree_const(w->sname);
kfree(w);
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_free_widget);
void snd_soc_dapm_reset_cache(struct snd_soc_dapm_context *dapm)
{
......@@ -4208,6 +4215,13 @@ snd_soc_dapm_new_dai(struct snd_soc_card *card,
return ERR_PTR(ret);
}
/**
* snd_soc_dapm_new_dai_widgets - Create new DAPM widgets
* @dapm: DAPM context
* @dai: parent DAI
*
* Returns 0 on success, error code otherwise.
*/
int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
struct snd_soc_dai *dai)
{
......@@ -4253,6 +4267,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_new_dai_widgets);
int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment