Commit bc366901 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Felix Fietkau

mt76x2: move mt76x2 mcu shared code in mt76x2_mcu_common.c

Move shared mt76x2 {pcie/usb} mcu shared code in a common file
and remove duplicated code
Signed-off-by: default avatarLorenzo Bianconi <lorenzo.bianconi@redhat.com>
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent 36fd09dd
...@@ -22,7 +22,7 @@ mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o ...@@ -22,7 +22,7 @@ mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
mt76x2-common-y := \ mt76x2-common-y := \
mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \ mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \
mt76x2_init_common.o mt76x2_common.o mt76x2_phy_common.o \ mt76x2_init_common.o mt76x2_common.o mt76x2_phy_common.o \
mt76x2_debugfs.o mt76x2_debugfs.o mt76x2_mcu_common.o
mt76x2e-y := \ mt76x2e-y := \
mt76x2_pci.o mt76x2_dma.o \ mt76x2_pci.o mt76x2_dma.o \
......
...@@ -165,66 +165,6 @@ mt76pci_load_firmware(struct mt76x2_dev *dev) ...@@ -165,66 +165,6 @@ mt76pci_load_firmware(struct mt76x2_dev *dev)
return -ENOENT; return -ENOENT;
} }
int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
u8 channel)
{
struct sk_buff *skb;
struct {
u8 cr_mode;
u8 temp;
u8 ch;
u8 _pad0;
__le32 cfg;
} __packed __aligned(4) msg = {
.cr_mode = type,
.temp = temp_level,
.ch = channel,
};
u32 val;
val = BIT(31);
val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
msg.cfg = cpu_to_le32(val);
/* first set the channel without the extension channel info */
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_LOAD_CR, true);
}
int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
u8 bw_index, bool scan)
{
struct sk_buff *skb;
struct {
u8 idx;
u8 scan;
u8 bw;
u8 _pad0;
__le16 chainmask;
u8 ext_chan;
u8 _pad1;
} __packed __aligned(4) msg = {
.idx = channel,
.scan = scan,
.bw = bw,
.chainmask = cpu_to_le16(dev->chainmask),
};
/* first set the channel without the extension channel info */
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true);
usleep_range(5000, 10000);
msg.ext_chan = 0xe0 + bw_index;
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true);
}
int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type, int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
u32 param) u32 param)
{ {
...@@ -252,41 +192,6 @@ int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type, ...@@ -252,41 +192,6 @@ int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
return 0; return 0;
} }
int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev,
struct mt76x2_tssi_comp *tssi_data)
{
struct sk_buff *skb;
struct {
__le32 id;
struct mt76x2_tssi_comp data;
} __packed __aligned(4) msg = {
.id = cpu_to_le32(MCU_CAL_TSSI_COMP),
.data = *tssi_data,
};
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
}
int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
bool force)
{
struct sk_buff *skb;
struct {
__le32 channel;
__le32 gain_val;
} __packed __aligned(4) msg = {
.channel = cpu_to_le32(channel),
.gain_val = cpu_to_le32(gain),
};
if (force)
msg.channel |= cpu_to_le32(BIT(31));
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_INIT_GAIN_OP, true);
}
int mt76x2_mcu_init(struct mt76x2_dev *dev) int mt76x2_mcu_init(struct mt76x2_dev *dev)
{ {
static const struct mt76_mcu_ops mt76x2_mcu_ops = { static const struct mt76_mcu_ops mt76x2_mcu_ops = {
......
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include "mt76x2.h"
#include "mt76x2_mcu.h"
#include "mt76x2_eeprom.h"
#include "mt76x02_dma.h"
int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
u8 bw_index, bool scan)
{
struct sk_buff *skb;
struct {
u8 idx;
u8 scan;
u8 bw;
u8 _pad0;
__le16 chainmask;
u8 ext_chan;
u8 _pad1;
} __packed __aligned(4) msg = {
.idx = channel,
.scan = scan,
.bw = bw,
.chainmask = cpu_to_le16(dev->chainmask),
};
/* first set the channel without the extension channel info */
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true);
usleep_range(5000, 10000);
msg.ext_chan = 0xe0 + bw_index;
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true);
}
EXPORT_SYMBOL_GPL(mt76x2_mcu_set_channel);
int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
u8 channel)
{
struct sk_buff *skb;
struct {
u8 cr_mode;
u8 temp;
u8 ch;
u8 _pad0;
__le32 cfg;
} __packed __aligned(4) msg = {
.cr_mode = type,
.temp = temp_level,
.ch = channel,
};
u32 val;
val = BIT(31);
val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
msg.cfg = cpu_to_le32(val);
/* first set the channel without the extension channel info */
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_LOAD_CR, true);
}
EXPORT_SYMBOL_GPL(mt76x2_mcu_load_cr);
int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
bool force)
{
struct sk_buff *skb;
struct {
__le32 channel;
__le32 gain_val;
} __packed __aligned(4) msg = {
.channel = cpu_to_le32(channel),
.gain_val = cpu_to_le32(gain),
};
if (force)
msg.channel |= cpu_to_le32(BIT(31));
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_INIT_GAIN_OP, true);
}
EXPORT_SYMBOL_GPL(mt76x2_mcu_init_gain);
int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev,
struct mt76x2_tssi_comp *tssi_data)
{
struct sk_buff *skb;
struct {
__le32 id;
struct mt76x2_tssi_comp data;
} __packed __aligned(4) msg = {
.id = cpu_to_le32(MCU_CAL_TSSI_COMP),
.data = *tssi_data,
};
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
}
EXPORT_SYMBOL_GPL(mt76x2_mcu_tssi_comp);
...@@ -50,18 +50,10 @@ void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev); ...@@ -50,18 +50,10 @@ void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev);
void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev); void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev);
void mt76x2u_mcu_complete_urb(struct urb *urb); void mt76x2u_mcu_complete_urb(struct urb *urb);
int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
u8 bw_index, bool scan);
int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type, int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
u32 val); u32 val);
int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
struct mt76x2_tssi_comp *tssi_data);
int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
bool force);
int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap, int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
bool ext, int rssi, u32 false_cca); bool ext, int rssi, u32 false_cca);
int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type,
u8 temp_level, u8 channel);
int mt76x2u_mcu_init(struct mt76x2_dev *dev); int mt76x2u_mcu_init(struct mt76x2_dev *dev);
int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev); int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev);
......
...@@ -240,7 +240,7 @@ int mt76x2u_init_hardware(struct mt76x2_dev *dev) ...@@ -240,7 +240,7 @@ int mt76x2u_init_hardware(struct mt76x2_dev *dev)
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f); mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
err = mt76x2u_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0); err = mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
if (err < 0) if (err < 0)
return err; return err;
......
...@@ -29,65 +29,6 @@ ...@@ -29,65 +29,6 @@
#define MT76U_MCU_DLM_OFFSET 0x110000 #define MT76U_MCU_DLM_OFFSET 0x110000
#define MT76U_MCU_ROM_PATCH_OFFSET 0x90000 #define MT76U_MCU_ROM_PATCH_OFFSET 0x90000
int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
u8 channel)
{
struct {
u8 cr_mode;
u8 temp;
u8 ch;
u8 _pad0;
__le32 cfg;
} __packed __aligned(4) msg = {
.cr_mode = type,
.temp = temp_level,
.ch = channel,
};
struct sk_buff *skb;
u32 val;
val = BIT(31);
val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
msg.cfg = cpu_to_le32(val);
/* first set the channel without the extension channel info */
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_LOAD_CR, true);
}
int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
u8 bw_index, bool scan)
{
struct {
u8 idx;
u8 scan;
u8 bw;
u8 _pad0;
__le16 chainmask;
u8 ext_chan;
u8 _pad1;
} __packed __aligned(4) msg = {
.idx = channel,
.scan = scan,
.bw = bw,
.chainmask = cpu_to_le16(dev->chainmask),
};
struct sk_buff *skb;
/* first set the channel without the extension channel info */
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true);
usleep_range(5000, 10000);
msg.ext_chan = 0xe0 + bw_index;
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true);
}
int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type, int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
u32 val) u32 val)
{ {
...@@ -104,25 +45,6 @@ int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type, ...@@ -104,25 +45,6 @@ int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
return mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true); return mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
} }
int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
bool force)
{
struct {
__le32 channel;
__le32 gain_val;
} __packed __aligned(4) msg = {
.channel = cpu_to_le32(channel),
.gain_val = cpu_to_le32(gain),
};
struct sk_buff *skb;
if (force)
msg.channel |= cpu_to_le32(BIT(31));
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_INIT_GAIN_OP, true);
}
int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap, int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
bool ext, int rssi, u32 false_cca) bool ext, int rssi, u32 false_cca)
{ {
...@@ -147,22 +69,6 @@ int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap, ...@@ -147,22 +69,6 @@ int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
return mt76_mcu_send_msg(dev, skb, CMD_DYNC_VGA_OP, true); return mt76_mcu_send_msg(dev, skb, CMD_DYNC_VGA_OP, true);
} }
int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
struct mt76x2_tssi_comp *tssi_data)
{
struct {
__le32 id;
struct mt76x2_tssi_comp data;
} __packed __aligned(4) msg = {
.id = cpu_to_le32(MCU_CAL_TSSI_COMP),
.data = *tssi_data,
};
struct sk_buff *skb;
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
}
static void mt76x2u_mcu_load_ivb(struct mt76x2_dev *dev) static void mt76x2u_mcu_load_ivb(struct mt76x2_dev *dev)
{ {
mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE, mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
......
...@@ -84,7 +84,7 @@ mt76x2u_phy_tssi_compensate(struct mt76x2_dev *dev) ...@@ -84,7 +84,7 @@ mt76x2u_phy_tssi_compensate(struct mt76x2_dev *dev)
if (!dev->cal.tssi_comp_pending) { if (!dev->cal.tssi_comp_pending) {
/* TSSI trigger */ /* TSSI trigger */
t.cal_mode = BIT(0); t.cal_mode = BIT(0);
mt76x2u_mcu_tssi_comp(dev, &t); mt76x2_mcu_tssi_comp(dev, &t);
dev->cal.tssi_comp_pending = true; dev->cal.tssi_comp_pending = true;
} else { } else {
if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4)) if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
...@@ -101,7 +101,7 @@ mt76x2u_phy_tssi_compensate(struct mt76x2_dev *dev) ...@@ -101,7 +101,7 @@ mt76x2u_phy_tssi_compensate(struct mt76x2_dev *dev)
t.offset0 = txp.chain[0].tssi_offset; t.offset0 = txp.chain[0].tssi_offset;
t.slope1 = txp.chain[1].tssi_slope; t.slope1 = txp.chain[1].tssi_slope;
t.offset1 = txp.chain[1].tssi_offset; t.offset1 = txp.chain[1].tssi_offset;
mt76x2u_mcu_tssi_comp(dev, &t); mt76x2_mcu_tssi_comp(dev, &t);
if (t.pa_mode || dev->cal.dpd_cal_done) if (t.pa_mode || dev->cal.dpd_cal_done)
return; return;
...@@ -239,11 +239,11 @@ int mt76x2u_phy_set_channel(struct mt76x2_dev *dev, ...@@ -239,11 +239,11 @@ int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
MT_EXT_CCA_CFG_CCA_MASK), MT_EXT_CCA_CFG_CCA_MASK),
ext_cca_chan[ch_group_index]); ext_cca_chan[ch_group_index]);
ret = mt76x2u_mcu_set_channel(dev, channel, bw, bw_index, scan); ret = mt76x2_mcu_set_channel(dev, channel, bw, bw_index, scan);
if (ret) if (ret)
return ret; return ret;
mt76x2u_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true); mt76x2_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
/* Enable LDPC Rx */ /* Enable LDPC Rx */
if (mt76xx_rev(dev) >= MT76XX_REV_E3) if (mt76xx_rev(dev) >= MT76XX_REV_E3)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment