Commit a996b9c6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'spi-v5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi

Pull spi updates from Mark Brown:
 "There's quite a lot of changes for SPI in this release but none in the
  core, they're all mostly small driver updates and additions. Some of
  the more notable changes include:

   - A huge set of cleanups, optimizations and improvements for the
     DesignWare driver from Serge Semin finishing up the work started
     last release.

   - Conversion of the Zynq gqspi driver to spi-mem.

   - Support for Baikal T1, Broadcom BCMSTB 7445, and Renesas R8A7742"

* tag 'spi-v5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi: (137 commits)
  spi: cadence: Add SPI transfer delays
  spi: dw: Add Baikal-T1 SPI Controller bindings
  spi: dw: Add Baikal-T1 SPI Controller glue driver
  spi: dw: Add poll-based SPI transfers support
  spi: dw: Introduce max mem-ops SPI bus frequency setting
  spi: dw: Add memory operations support
  spi: dw: Add generic DW SSI status-check method
  spi: dw: Move num-of retries parameter to the header file
  spi: dw: Explicitly de-assert CS on SPI transfer completion
  spi: dw: De-assert chip-select on reset
  spi: dw: Discard chip enabling on DMA setup error
  spi: dw: Unmask IRQs after enabling the chip
  spi: dw: Perform IRQ setup in a dedicated function
  spi: dw: Refactor IRQ-based SPI transfer procedure
  spi: dw: Refactor data IO procedure
  spi: dw: Add DW SPI controller config structure
  spi: dw: Update Rx sample delay in the config function
  spi: dw: Simplify the SPI bus speed config procedure
  spi: dw: Update SPI bus speed in a config function
  spi: dw: Detach SPI device specific CR0 config method
  ...
parents 1724e02e 98873118
...@@ -19,6 +19,7 @@ properties: ...@@ -19,6 +19,7 @@ properties:
compatible: compatible:
enum: enum:
- ibm,fsi2spi - ibm,fsi2spi
- ibm,fsi2spi-restricted
reg: reg:
items: items:
......
...@@ -32,6 +32,8 @@ Required properties: ...@@ -32,6 +32,8 @@ Required properties:
BRCMSTB SoCs BRCMSTB SoCs
"brcm,spi-bcm7435-qspi", "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI "brcm,spi-bcm7435-qspi", "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI
BRCMSTB SoCs BRCMSTB SoCs
"brcm,spi-bcm7445-qspi", "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI
BRCMSTB SoCs
"brcm,spi-bcm7216-qspi", "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI "brcm,spi-bcm7216-qspi", "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI
BRCMSTB SoCs BRCMSTB SoCs
"brcm,spi-bcm7278-qspi", "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI "brcm,spi-bcm7278-qspi", "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI
......
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/spi/mediatek,spi-mtk-nor.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Serial NOR flash controller for MediaTek ARM SoCs
maintainers:
- Bayi Cheng <bayi.cheng@mediatek.com>
- Chuanhong Guo <gch981213@gmail.com>
description: |
This spi controller support single, dual, or quad mode transfer for
SPI NOR flash. There should be only one spi slave device following
generic spi bindings. It's not recommended to use this controller
for devices other than SPI NOR flash due to limited transfer
capability of this controller.
allOf:
- $ref: /spi/spi-controller.yaml#
properties:
compatible:
oneOf:
- items:
- enum:
- mediatek,mt2701-nor
- mediatek,mt2712-nor
- mediatek,mt7622-nor
- mediatek,mt7623-nor
- mediatek,mt7629-nor
- mediatek,mt8192-nor
- enum:
- mediatek,mt8173-nor
- items:
- const: mediatek,mt8173-nor
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
items:
- description: clock used for spi bus
- description: clock used for controller
clock-names:
items:
- const: spi
- const: sf
required:
- compatible
- reg
- interrupts
- clocks
- clock-names
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/mt8173-clk.h>
soc {
#address-cells = <2>;
#size-cells = <2>;
nor_flash: spi@1100d000 {
compatible = "mediatek,mt8173-nor";
reg = <0 0x1100d000 0 0xe0>;
interrupts = <&spi_flash_irq>;
clocks = <&pericfg CLK_PERI_SPI>, <&topckgen CLK_TOP_SPINFI_IFR_SEL>;
clock-names = "spi", "sf";
#address-cells = <1>;
#size-cells = <0>;
flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
};
};
};
...@@ -25,6 +25,7 @@ properties: ...@@ -25,6 +25,7 @@ properties:
- items: - items:
- enum: - enum:
- renesas,qspi-r8a7742 # RZ/G1H
- renesas,qspi-r8a7743 # RZ/G1M - renesas,qspi-r8a7743 # RZ/G1M
- renesas,qspi-r8a7744 # RZ/G1N - renesas,qspi-r8a7744 # RZ/G1N
- renesas,qspi-r8a7745 # RZ/G1E - renesas,qspi-r8a7745 # RZ/G1E
......
...@@ -41,6 +41,7 @@ properties: ...@@ -41,6 +41,7 @@ properties:
- renesas,msiof-r8a774e1 # RZ/G2H - renesas,msiof-r8a774e1 # RZ/G2H
- renesas,msiof-r8a7795 # R-Car H3 - renesas,msiof-r8a7795 # R-Car H3
- renesas,msiof-r8a7796 # R-Car M3-W - renesas,msiof-r8a7796 # R-Car M3-W
- renesas,msiof-r8a77961 # R-Car M3-W+
- renesas,msiof-r8a77965 # R-Car M3-N - renesas,msiof-r8a77965 # R-Car M3-N
- renesas,msiof-r8a77970 # R-Car V3M - renesas,msiof-r8a77970 # R-Car V3M
- renesas,msiof-r8a77980 # R-Car V3H - renesas,msiof-r8a77980 # R-Car V3H
......
...@@ -22,6 +22,21 @@ allOf: ...@@ -22,6 +22,21 @@ allOf:
properties: properties:
reg: reg:
minItems: 2 minItems: 2
- if:
properties:
compatible:
contains:
enum:
- baikal,bt1-sys-ssi
then:
properties:
mux-controls:
maxItems: 1
required:
- mux-controls
else:
required:
- interrupts
properties: properties:
compatible: compatible:
...@@ -36,6 +51,8 @@ properties: ...@@ -36,6 +51,8 @@ properties:
- mscc,ocelot-spi - mscc,ocelot-spi
- mscc,jaguar2-spi - mscc,jaguar2-spi
- const: snps,dw-apb-ssi - const: snps,dw-apb-ssi
- description: Microchip Sparx5 SoC SPI Controller
const: microchip,sparx5-spi
- description: Amazon Alpine SPI Controller - description: Amazon Alpine SPI Controller
const: amazon,alpine-dw-apb-ssi const: amazon,alpine-dw-apb-ssi
- description: Renesas RZ/N1 SPI Controller - description: Renesas RZ/N1 SPI Controller
...@@ -44,12 +61,16 @@ properties: ...@@ -44,12 +61,16 @@ properties:
- const: snps,dw-apb-ssi - const: snps,dw-apb-ssi
- description: Intel Keem Bay SPI Controller - description: Intel Keem Bay SPI Controller
const: intel,keembay-ssi const: intel,keembay-ssi
- description: Baikal-T1 SPI Controller
const: baikal,bt1-ssi
- description: Baikal-T1 System Boot SPI Controller
const: baikal,bt1-sys-ssi
reg: reg:
minItems: 1 minItems: 1
items: items:
- description: DW APB SSI controller memory mapped registers - description: DW APB SSI controller memory mapped registers
- description: SPI MST region map - description: SPI MST region map or directly mapped SPI ROM
interrupts: interrupts:
maxItems: 1 maxItems: 1
...@@ -93,6 +114,12 @@ properties: ...@@ -93,6 +114,12 @@ properties:
- const: tx - const: tx
- const: rx - const: rx
rx-sample-delay-ns:
default: 0
description: Default value of the rx-sample-delay-ns property.
This value will be used if the property is not explicitly defined
for a SPI slave device. See below.
patternProperties: patternProperties:
"^.*@[0-9a-f]+$": "^.*@[0-9a-f]+$":
type: object type: object
...@@ -107,6 +134,13 @@ patternProperties: ...@@ -107,6 +134,13 @@ patternProperties:
spi-tx-bus-width: spi-tx-bus-width:
const: 1 const: 1
rx-sample-delay-ns:
description: SPI Rx sample delay offset, unit is nanoseconds.
The delay from the default sample time before the actual
sample of the rxd input signal occurs. The "rx_sample_delay"
is an optional feature of the designware controller, and the
upper limit is also subject to controller configuration.
unevaluatedProperties: false unevaluatedProperties: false
required: required:
...@@ -114,7 +148,6 @@ required: ...@@ -114,7 +148,6 @@ required:
- reg - reg
- "#address-cells" - "#address-cells"
- "#size-cells" - "#size-cells"
- interrupts
- clocks - clocks
examples: examples:
...@@ -129,5 +162,22 @@ examples: ...@@ -129,5 +162,22 @@ examples:
num-cs = <2>; num-cs = <2>;
cs-gpios = <&gpio0 13 0>, cs-gpios = <&gpio0 13 0>,
<&gpio0 14 0>; <&gpio0 14 0>;
rx-sample-delay-ns = <3>;
spi-flash@1 {
compatible = "spi-nand";
reg = <1>;
rx-sample-delay-ns = <7>;
};
};
- |
spi@1f040100 {
compatible = "baikal,bt1-sys-ssi";
reg = <0x1f040100 0x900>,
<0x1c000000 0x1000000>;
#address-cells = <1>;
#size-cells = <0>;
mux-controls = <&boot_mux>;
clocks = <&ccu_sys>;
clock-names = "ssi_clk";
}; };
... ...
* Serial NOR flash controller for MediaTek ARM SoCs
Required properties:
- compatible: For mt8173, compatible should be "mediatek,mt8173-nor",
and it's the fallback compatible for other Soc.
For every other SoC, should contain both the SoC-specific compatible
string and "mediatek,mt8173-nor".
The possible values are:
"mediatek,mt2701-nor", "mediatek,mt8173-nor"
"mediatek,mt2712-nor", "mediatek,mt8173-nor"
"mediatek,mt7622-nor", "mediatek,mt8173-nor"
"mediatek,mt7623-nor", "mediatek,mt8173-nor"
"mediatek,mt7629-nor", "mediatek,mt8173-nor"
"mediatek,mt8173-nor"
- reg: physical base address and length of the controller's register
- interrupts: Interrupt number used by the controller.
- clocks: the phandle of the clocks needed by the nor controller
- clock-names: the names of the clocks
the clocks should be named "spi" and "sf". "spi" is used for spi bus,
and "sf" is used for controller, these are the clocks witch
hardware needs to enabling nor flash and nor flash controller.
See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
- #address-cells: should be <1>
- #size-cells: should be <0>
There should be only one spi slave device following generic spi bindings.
It's not recommended to use this controller for devices other than SPI NOR
flash due to limited transfer capability of this controller.
Example:
nor_flash: spi@1100d000 {
compatible = "mediatek,mt8173-nor";
reg = <0 0x1100d000 0 0xe0>;
interrupts = <&spi_flash_irq>;
clocks = <&pericfg CLK_PERI_SPI>,
<&topckgen CLK_TOP_SPINFI_IFR_SEL>;
clock-names = "spi", "sf";
#address-cells = <1>;
#size-cells = <0>;
flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
};
};
...@@ -235,6 +235,7 @@ config SPI_DAVINCI ...@@ -235,6 +235,7 @@ config SPI_DAVINCI
config SPI_DESIGNWARE config SPI_DESIGNWARE
tristate "DesignWare SPI controller core support" tristate "DesignWare SPI controller core support"
imply SPI_MEM
help help
general driver for SPI controller core from DesignWare general driver for SPI controller core from DesignWare
...@@ -251,6 +252,34 @@ config SPI_DW_MMIO ...@@ -251,6 +252,34 @@ config SPI_DW_MMIO
tristate "Memory-mapped io interface driver for DW SPI core" tristate "Memory-mapped io interface driver for DW SPI core"
depends on HAS_IOMEM depends on HAS_IOMEM
config SPI_DW_BT1
tristate "Baikal-T1 SPI driver for DW SPI core"
depends on MIPS_BAIKAL_T1 || COMPILE_TEST
help
Baikal-T1 SoC is equipped with three DW APB SSI-based MMIO SPI
controllers. Two of them are pretty much normal: with IRQ, DMA,
FIFOs of 64 words depth, 4x CSs, but the third one as being a
part of the Baikal-T1 System Boot Controller has got a very
limited resources: no IRQ, no DMA, only a single native
chip-select and Tx/Rx FIFO with just 8 words depth available.
The later one is normally connected to an external SPI-nor flash
of 128Mb (in general can be of bigger size).
config SPI_DW_BT1_DIRMAP
bool "Directly mapped Baikal-T1 Boot SPI flash support"
depends on SPI_DW_BT1
select MULTIPLEXER
select MUX_MMIO
help
Directly mapped SPI flash memory is an interface specific to the
Baikal-T1 System Boot Controller. It is a 16MB MMIO region, which
can be used to access a peripheral memory device just by
reading/writing data from/to it. Note that the system APB bus
will stall during each IO from/to the dirmap region until the
operation is finished. So try not to use it concurrently with
time-critical tasks (like the SPI memory operations implemented
in this driver).
endif endif
config SPI_DLN2 config SPI_DLN2
...@@ -637,7 +666,7 @@ config SPI_QCOM_QSPI ...@@ -637,7 +666,7 @@ config SPI_QCOM_QSPI
config SPI_QUP config SPI_QUP
tristate "Qualcomm SPI controller with QUP interface" tristate "Qualcomm SPI controller with QUP interface"
depends on ARCH_QCOM || (ARM && COMPILE_TEST) depends on ARCH_QCOM || COMPILE_TEST
help help
Qualcomm Universal Peripheral (QUP) core is an AHB slave that Qualcomm Universal Peripheral (QUP) core is an AHB slave that
provides a common data path (an output FIFO and an input FIFO) provides a common data path (an output FIFO and an input FIFO)
......
...@@ -39,6 +39,7 @@ obj-$(CONFIG_SPI_DLN2) += spi-dln2.o ...@@ -39,6 +39,7 @@ obj-$(CONFIG_SPI_DLN2) += spi-dln2.o
obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o
spi-dw-y := spi-dw-core.o spi-dw-y := spi-dw-core.o
spi-dw-$(CONFIG_SPI_DW_DMA) += spi-dw-dma.o spi-dw-$(CONFIG_SPI_DW_DMA) += spi-dw-dma.o
obj-$(CONFIG_SPI_DW_BT1) += spi-dw-bt1.o
obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o
obj-$(CONFIG_SPI_DW_PCI) += spi-dw-pci.o obj-$(CONFIG_SPI_DW_PCI) += spi-dw-pci.o
obj-$(CONFIG_SPI_EFM32) += spi-efm32.o obj-$(CONFIG_SPI_EFM32) += spi-efm32.o
......
...@@ -848,7 +848,6 @@ static int a3700_spi_probe(struct platform_device *pdev) ...@@ -848,7 +848,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master); platform_set_drvdata(pdev, master);
spi = spi_master_get_devdata(master); spi = spi_master_get_devdata(master);
memset(spi, 0, sizeof(struct a3700_spi));
spi->master = master; spi->master = master;
......
...@@ -513,9 +513,8 @@ static int atmel_spi_configure_dma(struct spi_master *master, ...@@ -513,9 +513,8 @@ static int atmel_spi_configure_dma(struct spi_master *master,
master->dma_tx = dma_request_chan(dev, "tx"); master->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(master->dma_tx)) { if (IS_ERR(master->dma_tx)) {
err = PTR_ERR(master->dma_tx); err = dev_err_probe(dev, PTR_ERR(master->dma_tx),
if (err != -EPROBE_DEFER) "No TX DMA channel, DMA is disabled\n");
dev_err(dev, "No TX DMA channel, DMA is disabled\n");
goto error_clear; goto error_clear;
} }
...@@ -859,6 +858,7 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as, ...@@ -859,6 +858,7 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
csr = spi_readl(as, CSR0 + 4 * chip_select); csr = spi_readl(as, CSR0 + 4 * chip_select);
csr = SPI_BFINS(SCBR, scbr, csr); csr = SPI_BFINS(SCBR, scbr, csr);
spi_writel(as, CSR0 + 4 * chip_select, csr); spi_writel(as, CSR0 + 4 * chip_select, csr);
xfer->effective_speed_hz = bus_hz / scbr;
return 0; return 0;
} }
......
...@@ -1282,16 +1282,9 @@ static const struct bcm_qspi_data bcm_qspi_spcr3_data = { ...@@ -1282,16 +1282,9 @@ static const struct bcm_qspi_data bcm_qspi_spcr3_data = {
static const struct of_device_id bcm_qspi_of_match[] = { static const struct of_device_id bcm_qspi_of_match[] = {
{ {
.compatible = "brcm,spi-bcm7425-qspi", .compatible = "brcm,spi-bcm7445-qspi",
.data = &bcm_qspi_no_rev_data, .data = &bcm_qspi_rev_data,
},
{
.compatible = "brcm,spi-bcm7429-qspi",
.data = &bcm_qspi_no_rev_data,
},
{
.compatible = "brcm,spi-bcm7435-qspi",
.data = &bcm_qspi_no_rev_data,
}, },
{ {
.compatible = "brcm,spi-bcm-qspi", .compatible = "brcm,spi-bcm-qspi",
......
...@@ -1319,11 +1319,8 @@ static int bcm2835_spi_probe(struct platform_device *pdev) ...@@ -1319,11 +1319,8 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
bs->clk = devm_clk_get(&pdev->dev, NULL); bs->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(bs->clk)) { if (IS_ERR(bs->clk)) {
err = PTR_ERR(bs->clk); err = dev_err_probe(&pdev->dev, PTR_ERR(bs->clk),
if (err == -EPROBE_DEFER) "could not get clk\n");
dev_dbg(&pdev->dev, "could not get clk: %d\n", err);
else
dev_err(&pdev->dev, "could not get clk: %d\n", err);
goto out_controller_put; goto out_controller_put;
} }
......
...@@ -1119,11 +1119,8 @@ static int cqspi_request_mmap_dma(struct cqspi_st *cqspi) ...@@ -1119,11 +1119,8 @@ static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
cqspi->rx_chan = dma_request_chan_by_mask(&mask); cqspi->rx_chan = dma_request_chan_by_mask(&mask);
if (IS_ERR(cqspi->rx_chan)) { if (IS_ERR(cqspi->rx_chan)) {
int ret = PTR_ERR(cqspi->rx_chan); int ret = PTR_ERR(cqspi->rx_chan);
if (ret != -EPROBE_DEFER)
dev_err(&cqspi->pdev->dev, "No Rx DMA available\n");
cqspi->rx_chan = NULL; cqspi->rx_chan = NULL;
return ret; return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n");
} }
init_completion(&cqspi->rx_dma_complete); init_completion(&cqspi->rx_dma_complete);
......
...@@ -418,8 +418,8 @@ static int cdns_transfer_one(struct spi_master *master, ...@@ -418,8 +418,8 @@ static int cdns_transfer_one(struct spi_master *master,
xspi->rx_bytes = transfer->len; xspi->rx_bytes = transfer->len;
cdns_spi_setup_transfer(spi, transfer); cdns_spi_setup_transfer(spi, transfer);
cdns_spi_fill_tx_fifo(xspi); cdns_spi_fill_tx_fifo(xspi);
spi_transfer_delay_exec(transfer);
cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT); cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT);
return transfer->len; return transfer->len;
......
// SPDX-License-Identifier: GPL-2.0-only
//
// Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
//
// Authors:
// Ramil Zaripov <Ramil.Zaripov@baikalelectronics.ru>
// Serge Semin <Sergey.Semin@baikalelectronics.ru>
//
// Baikal-T1 DW APB SPI and System Boot SPI driver
//
#include <linux/clk.h>
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mux/consumer.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spi/spi-mem.h>
#include <linux/spi/spi.h>
#include "spi-dw.h"
#define BT1_BOOT_DIRMAP 0
#define BT1_BOOT_REGS 1
struct dw_spi_bt1 {
struct dw_spi dws;
struct clk *clk;
struct mux_control *mux;
#ifdef CONFIG_SPI_DW_BT1_DIRMAP
void __iomem *map;
resource_size_t map_len;
#endif
};
#define to_dw_spi_bt1(_ctlr) \
container_of(spi_controller_get_devdata(_ctlr), struct dw_spi_bt1, dws)
typedef int (*dw_spi_bt1_init_cb)(struct platform_device *pdev,
struct dw_spi_bt1 *dwsbt1);
#ifdef CONFIG_SPI_DW_BT1_DIRMAP
static int dw_spi_bt1_dirmap_create(struct spi_mem_dirmap_desc *desc)
{
struct dw_spi_bt1 *dwsbt1 = to_dw_spi_bt1(desc->mem->spi->controller);
if (!dwsbt1->map ||
!dwsbt1->dws.mem_ops.supports_op(desc->mem, &desc->info.op_tmpl))
return -EOPNOTSUPP;
/*
* Make sure the requested region doesn't go out of the physically
* mapped flash memory bounds and the operation is read-only.
*/
if (desc->info.offset + desc->info.length > dwsbt1->map_len ||
desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
return -EOPNOTSUPP;
return 0;
}
/*
* Directly mapped SPI memory region is only accessible in the dword chunks.
* That's why we have to create a dedicated read-method to copy data from there
* to the passed buffer.
*/
static void dw_spi_bt1_dirmap_copy_from_map(void *to, void __iomem *from, size_t len)
{
size_t shift, chunk;
u32 data;
/*
* We split the copying up into the next three stages: unaligned head,
* aligned body, unaligned tail.
*/
shift = (size_t)from & 0x3;
if (shift) {
chunk = min_t(size_t, 4 - shift, len);
data = readl_relaxed(from - shift);
memcpy(to, &data + shift, chunk);
from += chunk;
to += chunk;
len -= chunk;
}
while (len >= 4) {
data = readl_relaxed(from);
memcpy(to, &data, 4);
from += 4;
to += 4;
len -= 4;
}
if (len) {
data = readl_relaxed(from);
memcpy(to, &data, len);
}
}
static ssize_t dw_spi_bt1_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
struct dw_spi_bt1 *dwsbt1 = to_dw_spi_bt1(desc->mem->spi->controller);
struct dw_spi *dws = &dwsbt1->dws;
struct spi_mem *mem = desc->mem;
struct dw_spi_cfg cfg;
int ret;
/*
* Make sure the requested operation length is valid. Truncate the
* length if it's greater than the length of the MMIO region.
*/
if (offs >= dwsbt1->map_len || !len)
return 0;
len = min_t(size_t, len, dwsbt1->map_len - offs);
/* Collect the controller configuration required by the operation */
cfg.tmode = SPI_TMOD_EPROMREAD;
cfg.dfs = 8;
cfg.ndf = 4;
cfg.freq = mem->spi->max_speed_hz;
/* Make sure the corresponding CS is de-asserted on transmission */
dw_spi_set_cs(mem->spi, false);
spi_enable_chip(dws, 0);
dw_spi_update_config(dws, mem->spi, &cfg);
spi_umask_intr(dws, SPI_INT_RXFI);
spi_enable_chip(dws, 1);
/*
* Enable the transparent mode of the System Boot Controller.
* The SPI core IO should have been locked before calling this method
* so noone would be touching the controller' registers during the
* dirmap operation.
*/
ret = mux_control_select(dwsbt1->mux, BT1_BOOT_DIRMAP);
if (ret)
return ret;
dw_spi_bt1_dirmap_copy_from_map(buf, dwsbt1->map + offs, len);
mux_control_deselect(dwsbt1->mux);
dw_spi_set_cs(mem->spi, true);
ret = dw_spi_check_status(dws, true);
return ret ?: len;
}
#endif /* CONFIG_SPI_DW_BT1_DIRMAP */
static int dw_spi_bt1_std_init(struct platform_device *pdev,
struct dw_spi_bt1 *dwsbt1)
{
struct dw_spi *dws = &dwsbt1->dws;
dws->irq = platform_get_irq(pdev, 0);
if (dws->irq < 0)
return dws->irq;
dws->num_cs = 4;
/*
* Baikal-T1 Normal SPI Controllers don't always keep up with full SPI
* bus speed especially when it comes to the concurrent access to the
* APB bus resources. Thus we have no choice but to set a constraint on
* the SPI bus frequency for the memory operations which require to
* read/write data as fast as possible.
*/
dws->max_mem_freq = 20000000U;
dw_spi_dma_setup_generic(dws);
return 0;
}
static int dw_spi_bt1_sys_init(struct platform_device *pdev,
struct dw_spi_bt1 *dwsbt1)
{
struct resource *mem __maybe_unused;
struct dw_spi *dws = &dwsbt1->dws;
/*
* Baikal-T1 System Boot Controller is equipped with a mux, which
* switches between the directly mapped SPI flash access mode and
* IO access to the DW APB SSI registers. Note the mux controller
* must be setup to preserve the registers being accessible by default
* (on idle-state).
*/
dwsbt1->mux = devm_mux_control_get(&pdev->dev, NULL);
if (IS_ERR(dwsbt1->mux))
return PTR_ERR(dwsbt1->mux);
/*
* Directly mapped SPI flash memory is a 16MB MMIO region, which can be
* used to access a peripheral memory device just by reading/writing
* data from/to it. Note the system APB bus will stall during each IO
* from/to the dirmap region until the operation is finished. So don't
* use it concurrently with time-critical tasks (like the SPI memory
* operations implemented in the DW APB SSI driver).
*/
#ifdef CONFIG_SPI_DW_BT1_DIRMAP
mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (mem) {
dwsbt1->map = devm_ioremap_resource(&pdev->dev, mem);
if (!IS_ERR(dwsbt1->map)) {
dwsbt1->map_len = (mem->end - mem->start + 1);
dws->mem_ops.dirmap_create = dw_spi_bt1_dirmap_create;
dws->mem_ops.dirmap_read = dw_spi_bt1_dirmap_read;
} else {
dwsbt1->map = NULL;
}
}
#endif /* CONFIG_SPI_DW_BT1_DIRMAP */
/*
* There is no IRQ, no DMA and just one CS available on the System Boot
* SPI controller.
*/
dws->irq = IRQ_NOTCONNECTED;
dws->num_cs = 1;
/*
* Baikal-T1 System Boot SPI Controller doesn't keep up with the full
* SPI bus speed due to relatively slow APB bus and races for it'
* resources from different CPUs. The situation is worsen by a small
* FIFOs depth (just 8 words). It works better in a single CPU mode
* though, but still tends to be not fast enough at low CPU
* frequencies.
*/
if (num_possible_cpus() > 1)
dws->max_mem_freq = 10000000U;
else
dws->max_mem_freq = 20000000U;
return 0;
}
static int dw_spi_bt1_probe(struct platform_device *pdev)
{
dw_spi_bt1_init_cb init_func;
struct dw_spi_bt1 *dwsbt1;
struct resource *mem;
struct dw_spi *dws;
int ret;
dwsbt1 = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_bt1), GFP_KERNEL);
if (!dwsbt1)
return -ENOMEM;
dws = &dwsbt1->dws;
dws->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(dws->regs))
return PTR_ERR(dws->regs);
dws->paddr = mem->start;
dwsbt1->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dwsbt1->clk))
return PTR_ERR(dwsbt1->clk);
ret = clk_prepare_enable(dwsbt1->clk);
if (ret)
return ret;
dws->bus_num = pdev->id;
dws->reg_io_width = 4;
dws->max_freq = clk_get_rate(dwsbt1->clk);
if (!dws->max_freq)
goto err_disable_clk;
init_func = device_get_match_data(&pdev->dev);
ret = init_func(pdev, dwsbt1);
if (ret)
goto err_disable_clk;
pm_runtime_enable(&pdev->dev);
ret = dw_spi_add_host(&pdev->dev, dws);
if (ret)
goto err_disable_clk;
platform_set_drvdata(pdev, dwsbt1);
return 0;
err_disable_clk:
clk_disable_unprepare(dwsbt1->clk);
return ret;
}
static int dw_spi_bt1_remove(struct platform_device *pdev)
{
struct dw_spi_bt1 *dwsbt1 = platform_get_drvdata(pdev);
dw_spi_remove_host(&dwsbt1->dws);
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(dwsbt1->clk);
return 0;
}
static const struct of_device_id dw_spi_bt1_of_match[] = {
{ .compatible = "baikal,bt1-ssi", .data = dw_spi_bt1_std_init},
{ .compatible = "baikal,bt1-sys-ssi", .data = dw_spi_bt1_sys_init},
{ }
};
MODULE_DEVICE_TABLE(of, dw_spi_bt1_of_match);
static struct platform_driver dw_spi_bt1_driver = {
.probe = dw_spi_bt1_probe,
.remove = dw_spi_bt1_remove,
.driver = {
.name = "bt1-sys-ssi",
.of_match_table = dw_spi_bt1_of_match,
},
};
module_platform_driver(dw_spi_bt1_driver);
MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
MODULE_DESCRIPTION("Baikal-T1 System Boot SPI Controller driver");
MODULE_LICENSE("GPL v2");
This diff is collapsed.
This diff is collapsed.
...@@ -45,16 +45,12 @@ struct dw_spi_mmio { ...@@ -45,16 +45,12 @@ struct dw_spi_mmio {
#define MSCC_SPI_MST_SW_MODE_SW_PIN_CTRL_MODE BIT(13) #define MSCC_SPI_MST_SW_MODE_SW_PIN_CTRL_MODE BIT(13)
#define MSCC_SPI_MST_SW_MODE_SW_SPI_CS(x) (x << 5) #define MSCC_SPI_MST_SW_MODE_SW_SPI_CS(x) (x << 5)
/* #define SPARX5_FORCE_ENA 0xa4
* For Keem Bay, CTRLR0[31] is used to select controller mode. #define SPARX5_FORCE_VAL 0xa8
* 0: SSI is slave
* 1: SSI is master
*/
#define KEEMBAY_CTRLR0_SSIC_IS_MST BIT(31)
struct dw_spi_mscc { struct dw_spi_mscc {
struct regmap *syscon; struct regmap *syscon;
void __iomem *spi_mst; void __iomem *spi_mst; /* Not sparx5 */
}; };
/* /*
...@@ -114,9 +110,6 @@ static int dw_spi_mscc_init(struct platform_device *pdev, ...@@ -114,9 +110,6 @@ static int dw_spi_mscc_init(struct platform_device *pdev,
dwsmmio->dws.set_cs = dw_spi_mscc_set_cs; dwsmmio->dws.set_cs = dw_spi_mscc_set_cs;
dwsmmio->priv = dwsmscc; dwsmmio->priv = dwsmscc;
/* Register hook to configure CTRLR0 */
dwsmmio->dws.update_cr0 = dw_spi_update_cr0;
return 0; return 0;
} }
...@@ -134,53 +127,97 @@ static int dw_spi_mscc_jaguar2_init(struct platform_device *pdev, ...@@ -134,53 +127,97 @@ static int dw_spi_mscc_jaguar2_init(struct platform_device *pdev,
JAGUAR2_IF_SI_OWNER_OFFSET); JAGUAR2_IF_SI_OWNER_OFFSET);
} }
static int dw_spi_alpine_init(struct platform_device *pdev, /*
* The Designware SPI controller (referred to as master in the
* documentation) automatically deasserts chip select when the tx fifo
* is empty. The chip selects then needs to be driven by a CS override
* register. enable is an active low signal.
*/
static void dw_spi_sparx5_set_cs(struct spi_device *spi, bool enable)
{
struct dw_spi *dws = spi_master_get_devdata(spi->master);
struct dw_spi_mmio *dwsmmio = container_of(dws, struct dw_spi_mmio, dws);
struct dw_spi_mscc *dwsmscc = dwsmmio->priv;
u8 cs = spi->chip_select;
if (!enable) {
/* CS override drive enable */
regmap_write(dwsmscc->syscon, SPARX5_FORCE_ENA, 1);
/* Now set CSx enabled */
regmap_write(dwsmscc->syscon, SPARX5_FORCE_VAL, ~BIT(cs));
/* Allow settle */
usleep_range(1, 5);
} else {
/* CS value */
regmap_write(dwsmscc->syscon, SPARX5_FORCE_VAL, ~0);
/* Allow settle */
usleep_range(1, 5);
/* CS override drive disable */
regmap_write(dwsmscc->syscon, SPARX5_FORCE_ENA, 0);
}
dw_spi_set_cs(spi, enable);
}
static int dw_spi_mscc_sparx5_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio) struct dw_spi_mmio *dwsmmio)
{ {
dwsmmio->dws.cs_override = 1; const char *syscon_name = "microchip,sparx5-cpu-syscon";
struct device *dev = &pdev->dev;
struct dw_spi_mscc *dwsmscc;
/* Register hook to configure CTRLR0 */ if (!IS_ENABLED(CONFIG_SPI_MUX)) {
dwsmmio->dws.update_cr0 = dw_spi_update_cr0; dev_err(dev, "This driver needs CONFIG_SPI_MUX\n");
return -EOPNOTSUPP;
}
dwsmscc = devm_kzalloc(dev, sizeof(*dwsmscc), GFP_KERNEL);
if (!dwsmscc)
return -ENOMEM;
dwsmscc->syscon =
syscon_regmap_lookup_by_compatible(syscon_name);
if (IS_ERR(dwsmscc->syscon)) {
dev_err(dev, "No syscon map %s\n", syscon_name);
return PTR_ERR(dwsmscc->syscon);
}
dwsmmio->dws.set_cs = dw_spi_sparx5_set_cs;
dwsmmio->priv = dwsmscc;
return 0; return 0;
} }
static int dw_spi_dw_apb_init(struct platform_device *pdev, static int dw_spi_alpine_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio) struct dw_spi_mmio *dwsmmio)
{ {
/* Register hook to configure CTRLR0 */ dwsmmio->dws.caps = DW_SPI_CAP_CS_OVERRIDE;
dwsmmio->dws.update_cr0 = dw_spi_update_cr0;
dw_spi_dma_setup_generic(&dwsmmio->dws);
return 0; return 0;
} }
static int dw_spi_dwc_ssi_init(struct platform_device *pdev, static int dw_spi_dw_apb_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio) struct dw_spi_mmio *dwsmmio)
{ {
/* Register hook to configure CTRLR0 */
dwsmmio->dws.update_cr0 = dw_spi_update_cr0_v1_01a;
dw_spi_dma_setup_generic(&dwsmmio->dws); dw_spi_dma_setup_generic(&dwsmmio->dws);
return 0; return 0;
} }
static u32 dw_spi_update_cr0_keembay(struct spi_controller *master, static int dw_spi_dwc_ssi_init(struct platform_device *pdev,
struct spi_device *spi, struct dw_spi_mmio *dwsmmio)
struct spi_transfer *transfer)
{ {
u32 cr0 = dw_spi_update_cr0_v1_01a(master, spi, transfer); dwsmmio->dws.caps = DW_SPI_CAP_DWC_SSI;
dw_spi_dma_setup_generic(&dwsmmio->dws);
return cr0 | KEEMBAY_CTRLR0_SSIC_IS_MST; return 0;
} }
static int dw_spi_keembay_init(struct platform_device *pdev, static int dw_spi_keembay_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio) struct dw_spi_mmio *dwsmmio)
{ {
/* Register hook to configure CTRLR0 */ dwsmmio->dws.caps = DW_SPI_CAP_KEEMBAY_MST | DW_SPI_CAP_DWC_SSI;
dwsmmio->dws.update_cr0 = dw_spi_update_cr0_keembay;
return 0; return 0;
} }
...@@ -297,6 +334,7 @@ static const struct of_device_id dw_spi_mmio_of_match[] = { ...@@ -297,6 +334,7 @@ static const struct of_device_id dw_spi_mmio_of_match[] = {
{ .compatible = "renesas,rzn1-spi", .data = dw_spi_dw_apb_init}, { .compatible = "renesas,rzn1-spi", .data = dw_spi_dw_apb_init},
{ .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_dwc_ssi_init}, { .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_dwc_ssi_init},
{ .compatible = "intel,keembay-ssi", .data = dw_spi_keembay_init}, { .compatible = "intel,keembay-ssi", .data = dw_spi_keembay_init},
{ .compatible = "microchip,sparx5-spi", dw_spi_mscc_sparx5_init},
{ /* end of table */} { /* end of table */}
}; };
MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match); MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match);
......
...@@ -48,9 +48,6 @@ static int spi_mid_init(struct dw_spi *dws) ...@@ -48,9 +48,6 @@ static int spi_mid_init(struct dw_spi *dws)
iounmap(clk_reg); iounmap(clk_reg);
/* Register hook to configure CTRLR0 */
dws->update_cr0 = dw_spi_update_cr0;
dw_spi_dma_setup_mfld(dws); dw_spi_dma_setup_mfld(dws);
return 0; return 0;
...@@ -58,9 +55,6 @@ static int spi_mid_init(struct dw_spi *dws) ...@@ -58,9 +55,6 @@ static int spi_mid_init(struct dw_spi *dws)
static int spi_generic_init(struct dw_spi *dws) static int spi_generic_init(struct dw_spi *dws)
{ {
/* Register hook to configure CTRLR0 */
dws->update_cr0 = dw_spi_update_cr0;
dw_spi_dma_setup_generic(dws); dw_spi_dma_setup_generic(dws);
return 0; return 0;
...@@ -127,18 +121,16 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -127,18 +121,16 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (desc->setup) { if (desc->setup) {
ret = desc->setup(dws); ret = desc->setup(dws);
if (ret) if (ret)
return ret; goto err_free_irq_vectors;
} }
} else { } else {
pci_free_irq_vectors(pdev); ret = -ENODEV;
return -ENODEV; goto err_free_irq_vectors;
} }
ret = dw_spi_add_host(&pdev->dev, dws); ret = dw_spi_add_host(&pdev->dev, dws);
if (ret) { if (ret)
pci_free_irq_vectors(pdev); goto err_free_irq_vectors;
return ret;
}
/* PCI hook and SPI hook use the same drv data */ /* PCI hook and SPI hook use the same drv data */
pci_set_drvdata(pdev, dws); pci_set_drvdata(pdev, dws);
...@@ -152,6 +144,10 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -152,6 +144,10 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pm_runtime_allow(&pdev->dev); pm_runtime_allow(&pdev->dev);
return 0; return 0;
err_free_irq_vectors:
pci_free_irq_vectors(pdev);
return ret;
} }
static void spi_pci_remove(struct pci_dev *pdev) static void spi_pci_remove(struct pci_dev *pdev)
......
...@@ -2,11 +2,13 @@ ...@@ -2,11 +2,13 @@
#ifndef DW_SPI_HEADER_H #ifndef DW_SPI_HEADER_H
#define DW_SPI_HEADER_H #define DW_SPI_HEADER_H
#include <linux/bits.h>
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/irqreturn.h> #include <linux/irqreturn.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/spi/spi-mem.h>
/* Register offsets */ /* Register offsets */
#define DW_SPI_CTRLR0 0x00 #define DW_SPI_CTRLR0 0x00
...@@ -34,6 +36,7 @@ ...@@ -34,6 +36,7 @@
#define DW_SPI_IDR 0x58 #define DW_SPI_IDR 0x58
#define DW_SPI_VERSION 0x5c #define DW_SPI_VERSION 0x5c
#define DW_SPI_DR 0x60 #define DW_SPI_DR 0x60
#define DW_SPI_RX_SAMPLE_DLY 0xf0
#define DW_SPI_CS_OVERRIDE 0xf4 #define DW_SPI_CS_OVERRIDE 0xf4
/* Bit fields in CTRLR0 */ /* Bit fields in CTRLR0 */
...@@ -69,6 +72,16 @@ ...@@ -69,6 +72,16 @@
#define DWC_SSI_CTRLR0_FRF_OFFSET 6 #define DWC_SSI_CTRLR0_FRF_OFFSET 6
#define DWC_SSI_CTRLR0_DFS_OFFSET 0 #define DWC_SSI_CTRLR0_DFS_OFFSET 0
/*
* For Keem Bay, CTRLR0[31] is used to select controller mode.
* 0: SSI is slave
* 1: SSI is master
*/
#define DWC_SSI_CTRLR0_KEEMBAY_MST BIT(31)
/* Bit fields in CTRLR1 */
#define SPI_NDF_MASK GENMASK(15, 0)
/* Bit fields in SR, 7 bits */ /* Bit fields in SR, 7 bits */
#define SR_MASK 0x7f /* cover 7 bits */ #define SR_MASK 0x7f /* cover 7 bits */
#define SR_BUSY (1 << 0) #define SR_BUSY (1 << 0)
...@@ -91,8 +104,12 @@ ...@@ -91,8 +104,12 @@
#define SPI_DMA_RDMAE (1 << 0) #define SPI_DMA_RDMAE (1 << 0)
#define SPI_DMA_TDMAE (1 << 1) #define SPI_DMA_TDMAE (1 << 1)
/* TX RX interrupt level threshold, max can be 256 */ #define SPI_WAIT_RETRIES 5
#define SPI_INT_THRESHOLD 32 #define SPI_BUF_SIZE \
(sizeof_field(struct spi_mem_op, cmd.opcode) + \
sizeof_field(struct spi_mem_op, addr.val) + 256)
#define SPI_GET_BYTE(_val, _idx) \
((_val) >> (BITS_PER_BYTE * (_idx)) & 0xff)
enum dw_ssi_type { enum dw_ssi_type {
SSI_MOTO_SPI = 0, SSI_MOTO_SPI = 0,
...@@ -100,6 +117,19 @@ enum dw_ssi_type { ...@@ -100,6 +117,19 @@ enum dw_ssi_type {
SSI_NS_MICROWIRE, SSI_NS_MICROWIRE,
}; };
/* DW SPI capabilities */
#define DW_SPI_CAP_CS_OVERRIDE BIT(0)
#define DW_SPI_CAP_KEEMBAY_MST BIT(1)
#define DW_SPI_CAP_DWC_SSI BIT(2)
/* Slave spi_transfer/spi_mem_op related */
struct dw_spi_cfg {
u8 tmode;
u8 dfs;
u32 ndf;
u32 freq;
};
struct dw_spi; struct dw_spi;
struct dw_spi_dma_ops { struct dw_spi_dma_ops {
int (*dma_init)(struct device *dev, struct dw_spi *dws); int (*dma_init)(struct device *dev, struct dw_spi *dws);
...@@ -113,39 +143,43 @@ struct dw_spi_dma_ops { ...@@ -113,39 +143,43 @@ struct dw_spi_dma_ops {
struct dw_spi { struct dw_spi {
struct spi_controller *master; struct spi_controller *master;
enum dw_ssi_type type;
void __iomem *regs; void __iomem *regs;
unsigned long paddr; unsigned long paddr;
int irq; int irq;
u32 fifo_len; /* depth of the FIFO buffer */ u32 fifo_len; /* depth of the FIFO buffer */
u32 max_mem_freq; /* max mem-ops bus freq */
u32 max_freq; /* max bus freq supported */ u32 max_freq; /* max bus freq supported */
int cs_override; u32 caps; /* DW SPI capabilities */
u32 reg_io_width; /* DR I/O width in bytes */ u32 reg_io_width; /* DR I/O width in bytes */
u16 bus_num; u16 bus_num;
u16 num_cs; /* supported slave numbers */ u16 num_cs; /* supported slave numbers */
void (*set_cs)(struct spi_device *spi, bool enable); void (*set_cs)(struct spi_device *spi, bool enable);
u32 (*update_cr0)(struct spi_controller *master, struct spi_device *spi,
struct spi_transfer *transfer);
/* Current message transfer state info */ /* Current message transfer state info */
size_t len;
void *tx; void *tx;
void *tx_end; unsigned int tx_len;
spinlock_t buf_lock;
void *rx; void *rx;
void *rx_end; unsigned int rx_len;
u8 buf[SPI_BUF_SIZE];
int dma_mapped; int dma_mapped;
u8 n_bytes; /* current is a 1/2 bytes op */ u8 n_bytes; /* current is a 1/2 bytes op */
irqreturn_t (*transfer_handler)(struct dw_spi *dws); irqreturn_t (*transfer_handler)(struct dw_spi *dws);
u32 current_freq; /* frequency in hz */ u32 current_freq; /* frequency in hz */
u32 cur_rx_sample_dly;
u32 def_rx_sample_dly_ns;
/* Custom memory operations */
struct spi_controller_mem_ops mem_ops;
/* DMA info */ /* DMA info */
struct dma_chan *txchan; struct dma_chan *txchan;
u32 txburst; u32 txburst;
struct dma_chan *rxchan; struct dma_chan *rxchan;
u32 rxburst; u32 rxburst;
u32 dma_sg_burst;
unsigned long dma_chan_busy; unsigned long dma_chan_busy;
dma_addr_t dma_addr; /* phy address of the Data register */ dma_addr_t dma_addr; /* phy address of the Data register */
const struct dw_spi_dma_ops *dma_ops; const struct dw_spi_dma_ops *dma_ops;
...@@ -162,29 +196,19 @@ static inline u32 dw_readl(struct dw_spi *dws, u32 offset) ...@@ -162,29 +196,19 @@ static inline u32 dw_readl(struct dw_spi *dws, u32 offset)
return __raw_readl(dws->regs + offset); return __raw_readl(dws->regs + offset);
} }
static inline u16 dw_readw(struct dw_spi *dws, u32 offset)
{
return __raw_readw(dws->regs + offset);
}
static inline void dw_writel(struct dw_spi *dws, u32 offset, u32 val) static inline void dw_writel(struct dw_spi *dws, u32 offset, u32 val)
{ {
__raw_writel(val, dws->regs + offset); __raw_writel(val, dws->regs + offset);
} }
static inline void dw_writew(struct dw_spi *dws, u32 offset, u16 val)
{
__raw_writew(val, dws->regs + offset);
}
static inline u32 dw_read_io_reg(struct dw_spi *dws, u32 offset) static inline u32 dw_read_io_reg(struct dw_spi *dws, u32 offset)
{ {
switch (dws->reg_io_width) { switch (dws->reg_io_width) {
case 2: case 2:
return dw_readw(dws, offset); return readw_relaxed(dws->regs + offset);
case 4: case 4:
default: default:
return dw_readl(dws, offset); return readl_relaxed(dws->regs + offset);
} }
} }
...@@ -192,11 +216,11 @@ static inline void dw_write_io_reg(struct dw_spi *dws, u32 offset, u32 val) ...@@ -192,11 +216,11 @@ static inline void dw_write_io_reg(struct dw_spi *dws, u32 offset, u32 val)
{ {
switch (dws->reg_io_width) { switch (dws->reg_io_width) {
case 2: case 2:
dw_writew(dws, offset, val); writew_relaxed(val, dws->regs + offset);
break; break;
case 4: case 4:
default: default:
dw_writel(dws, offset, val); writel_relaxed(val, dws->regs + offset);
break; break;
} }
} }
...@@ -230,14 +254,16 @@ static inline void spi_umask_intr(struct dw_spi *dws, u32 mask) ...@@ -230,14 +254,16 @@ static inline void spi_umask_intr(struct dw_spi *dws, u32 mask)
} }
/* /*
* This does disable the SPI controller, interrupts, and re-enable the * This disables the SPI controller, interrupts, clears the interrupts status
* controller back. Transmit and receive FIFO buffers are cleared when the * and CS, then re-enables the controller back. Transmit and receive FIFO
* device is disabled. * buffers are cleared when the device is disabled.
*/ */
static inline void spi_reset_chip(struct dw_spi *dws) static inline void spi_reset_chip(struct dw_spi *dws)
{ {
spi_enable_chip(dws, 0); spi_enable_chip(dws, 0);
spi_mask_intr(dws, 0xff); spi_mask_intr(dws, 0xff);
dw_readl(dws, DW_SPI_ICR);
dw_writel(dws, DW_SPI_SER, 0);
spi_enable_chip(dws, 1); spi_enable_chip(dws, 1);
} }
...@@ -248,16 +274,13 @@ static inline void spi_shutdown_chip(struct dw_spi *dws) ...@@ -248,16 +274,13 @@ static inline void spi_shutdown_chip(struct dw_spi *dws)
} }
extern void dw_spi_set_cs(struct spi_device *spi, bool enable); extern void dw_spi_set_cs(struct spi_device *spi, bool enable);
extern void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
struct dw_spi_cfg *cfg);
extern int dw_spi_check_status(struct dw_spi *dws, bool raw);
extern int dw_spi_add_host(struct device *dev, struct dw_spi *dws); extern int dw_spi_add_host(struct device *dev, struct dw_spi *dws);
extern void dw_spi_remove_host(struct dw_spi *dws); extern void dw_spi_remove_host(struct dw_spi *dws);
extern int dw_spi_suspend_host(struct dw_spi *dws); extern int dw_spi_suspend_host(struct dw_spi *dws);
extern int dw_spi_resume_host(struct dw_spi *dws); extern int dw_spi_resume_host(struct dw_spi *dws);
extern u32 dw_spi_update_cr0(struct spi_controller *master,
struct spi_device *spi,
struct spi_transfer *transfer);
extern u32 dw_spi_update_cr0_v1_01a(struct spi_controller *master,
struct spi_device *spi,
struct spi_transfer *transfer);
#ifdef CONFIG_SPI_DW_DMA #ifdef CONFIG_SPI_DW_DMA
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#define FSI_ENGID_SPI 0x23 #define FSI_ENGID_SPI 0x23
#define FSI_MBOX_ROOT_CTRL_8 0x2860 #define FSI_MBOX_ROOT_CTRL_8 0x2860
#define FSI_MBOX_ROOT_CTRL_8_SPI_MUX 0xf0000000
#define FSI2SPI_DATA0 0x00 #define FSI2SPI_DATA0 0x00
#define FSI2SPI_DATA1 0x04 #define FSI2SPI_DATA1 0x04
...@@ -24,11 +25,16 @@ ...@@ -24,11 +25,16 @@
#define SPI_FSI_BASE 0x70000 #define SPI_FSI_BASE 0x70000
#define SPI_FSI_INIT_TIMEOUT_MS 1000 #define SPI_FSI_INIT_TIMEOUT_MS 1000
#define SPI_FSI_MAX_TRANSFER_SIZE 2048 #define SPI_FSI_MAX_XFR_SIZE 2048
#define SPI_FSI_MAX_XFR_SIZE_RESTRICTED 32
#define SPI_FSI_ERROR 0x0 #define SPI_FSI_ERROR 0x0
#define SPI_FSI_COUNTER_CFG 0x1 #define SPI_FSI_COUNTER_CFG 0x1
#define SPI_FSI_COUNTER_CFG_LOOPS(x) (((u64)(x) & 0xffULL) << 32) #define SPI_FSI_COUNTER_CFG_LOOPS(x) (((u64)(x) & 0xffULL) << 32)
#define SPI_FSI_COUNTER_CFG_N2_RX BIT_ULL(8)
#define SPI_FSI_COUNTER_CFG_N2_TX BIT_ULL(9)
#define SPI_FSI_COUNTER_CFG_N2_IMPLICIT BIT_ULL(10)
#define SPI_FSI_COUNTER_CFG_N2_RELOAD BIT_ULL(11)
#define SPI_FSI_CFG1 0x2 #define SPI_FSI_CFG1 0x2
#define SPI_FSI_CLOCK_CFG 0x3 #define SPI_FSI_CLOCK_CFG 0x3
#define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32) #define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32)
...@@ -61,7 +67,7 @@ ...@@ -61,7 +67,7 @@
#define SPI_FSI_STATUS_RDR_OVERRUN BIT_ULL(62) #define SPI_FSI_STATUS_RDR_OVERRUN BIT_ULL(62)
#define SPI_FSI_STATUS_RDR_FULL BIT_ULL(63) #define SPI_FSI_STATUS_RDR_FULL BIT_ULL(63)
#define SPI_FSI_STATUS_ANY_ERROR \ #define SPI_FSI_STATUS_ANY_ERROR \
(SPI_FSI_STATUS_ERROR | SPI_FSI_STATUS_TDR_UNDERRUN | \ (SPI_FSI_STATUS_ERROR | \
SPI_FSI_STATUS_TDR_OVERRUN | SPI_FSI_STATUS_RDR_UNDERRUN | \ SPI_FSI_STATUS_TDR_OVERRUN | SPI_FSI_STATUS_RDR_UNDERRUN | \
SPI_FSI_STATUS_RDR_OVERRUN) SPI_FSI_STATUS_RDR_OVERRUN)
#define SPI_FSI_PORT_CTRL 0x9 #define SPI_FSI_PORT_CTRL 0x9
...@@ -70,6 +76,8 @@ struct fsi_spi { ...@@ -70,6 +76,8 @@ struct fsi_spi {
struct device *dev; /* SPI controller device */ struct device *dev; /* SPI controller device */
struct fsi_device *fsi; /* FSI2SPI CFAM engine device */ struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
u32 base; u32 base;
size_t max_xfr_size;
bool restricted;
}; };
struct fsi_spi_sequence { struct fsi_spi_sequence {
...@@ -77,6 +85,26 @@ struct fsi_spi_sequence { ...@@ -77,6 +85,26 @@ struct fsi_spi_sequence {
u64 data; u64 data;
}; };
static int fsi_spi_check_mux(struct fsi_device *fsi, struct device *dev)
{
int rc;
u32 root_ctrl_8;
__be32 root_ctrl_8_be;
rc = fsi_slave_read(fsi->slave, FSI_MBOX_ROOT_CTRL_8, &root_ctrl_8_be,
sizeof(root_ctrl_8_be));
if (rc)
return rc;
root_ctrl_8 = be32_to_cpu(root_ctrl_8_be);
dev_dbg(dev, "Root control register 8: %08x\n", root_ctrl_8);
if ((root_ctrl_8 & FSI_MBOX_ROOT_CTRL_8_SPI_MUX) ==
FSI_MBOX_ROOT_CTRL_8_SPI_MUX)
return 0;
return -ENOLINK;
}
static int fsi_spi_check_status(struct fsi_spi *ctx) static int fsi_spi_check_status(struct fsi_spi *ctx)
{ {
int rc; int rc;
...@@ -205,8 +233,12 @@ static int fsi_spi_reset(struct fsi_spi *ctx) ...@@ -205,8 +233,12 @@ static int fsi_spi_reset(struct fsi_spi *ctx)
if (rc) if (rc)
return rc; return rc;
return fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG, rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
SPI_FSI_CLOCK_CFG_RESET2); SPI_FSI_CLOCK_CFG_RESET2);
if (rc)
return rc;
return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
} }
static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val) static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
...@@ -214,8 +246,8 @@ static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val) ...@@ -214,8 +246,8 @@ static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
/* /*
* Add the next byte of instruction to the 8-byte sequence register. * Add the next byte of instruction to the 8-byte sequence register.
* Then decrement the counter so that the next instruction will go in * Then decrement the counter so that the next instruction will go in
* the right place. Return the number of "slots" left in the sequence * the right place. Return the index of the slot we just filled in the
* register. * sequence register.
*/ */
seq->data |= (u64)val << seq->bit; seq->data |= (u64)val << seq->bit;
seq->bit -= 8; seq->bit -= 8;
...@@ -233,40 +265,71 @@ static int fsi_spi_sequence_transfer(struct fsi_spi *ctx, ...@@ -233,40 +265,71 @@ static int fsi_spi_sequence_transfer(struct fsi_spi *ctx,
struct fsi_spi_sequence *seq, struct fsi_spi_sequence *seq,
struct spi_transfer *transfer) struct spi_transfer *transfer)
{ {
bool docfg = false;
int loops; int loops;
int idx; int idx;
int rc; int rc;
u8 val = 0;
u8 len = min(transfer->len, 8U); u8 len = min(transfer->len, 8U);
u8 rem = transfer->len % len; u8 rem = transfer->len % len;
u64 cfg = 0ULL;
loops = transfer->len / len; loops = transfer->len / len;
if (transfer->tx_buf) { if (transfer->tx_buf) {
idx = fsi_spi_sequence_add(seq, val = SPI_FSI_SEQUENCE_SHIFT_OUT(len);
SPI_FSI_SEQUENCE_SHIFT_OUT(len)); idx = fsi_spi_sequence_add(seq, val);
if (rem) if (rem)
rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem); rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem);
} else if (transfer->rx_buf) { } else if (transfer->rx_buf) {
idx = fsi_spi_sequence_add(seq, val = SPI_FSI_SEQUENCE_SHIFT_IN(len);
SPI_FSI_SEQUENCE_SHIFT_IN(len)); idx = fsi_spi_sequence_add(seq, val);
if (rem) if (rem)
rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem); rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem);
} else { } else {
return -EINVAL; return -EINVAL;
} }
if (ctx->restricted) {
const int eidx = rem ? 5 : 6;
while (loops > 1 && idx <= eidx) {
idx = fsi_spi_sequence_add(seq, val);
loops--;
docfg = true;
}
if (loops > 1) {
dev_warn(ctx->dev, "No sequencer slots; aborting.\n");
return -EINVAL;
}
}
if (loops > 1) { if (loops > 1) {
fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx)); fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx));
docfg = true;
}
if (rem) if (docfg) {
fsi_spi_sequence_add(seq, rem); cfg = SPI_FSI_COUNTER_CFG_LOOPS(loops - 1);
if (transfer->rx_buf)
cfg |= SPI_FSI_COUNTER_CFG_N2_RX |
SPI_FSI_COUNTER_CFG_N2_TX |
SPI_FSI_COUNTER_CFG_N2_IMPLICIT |
SPI_FSI_COUNTER_CFG_N2_RELOAD;
rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, cfg);
SPI_FSI_COUNTER_CFG_LOOPS(loops - 1));
if (rc) if (rc)
return rc; return rc;
} else {
fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
} }
if (rem)
fsi_spi_sequence_add(seq, rem);
return 0; return 0;
} }
...@@ -275,6 +338,7 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx, ...@@ -275,6 +338,7 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
{ {
int rc = 0; int rc = 0;
u64 status = 0ULL; u64 status = 0ULL;
u64 cfg = 0ULL;
if (transfer->tx_buf) { if (transfer->tx_buf) {
int nb; int nb;
...@@ -312,6 +376,16 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx, ...@@ -312,6 +376,16 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
u64 in = 0ULL; u64 in = 0ULL;
u8 *rx = transfer->rx_buf; u8 *rx = transfer->rx_buf;
rc = fsi_spi_read_reg(ctx, SPI_FSI_COUNTER_CFG, &cfg);
if (rc)
return rc;
if (cfg & SPI_FSI_COUNTER_CFG_N2_IMPLICIT) {
rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, 0);
if (rc)
return rc;
}
while (transfer->len > recv) { while (transfer->len > recv) {
do { do {
rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
...@@ -350,7 +424,7 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx) ...@@ -350,7 +424,7 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx)
u64 status = 0ULL; u64 status = 0ULL;
u64 wanted_clock_cfg = SPI_FSI_CLOCK_CFG_ECC_DISABLE | u64 wanted_clock_cfg = SPI_FSI_CLOCK_CFG_ECC_DISABLE |
SPI_FSI_CLOCK_CFG_SCK_NO_DEL | SPI_FSI_CLOCK_CFG_SCK_NO_DEL |
FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 4); FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 19);
end = jiffies + msecs_to_jiffies(SPI_FSI_INIT_TIMEOUT_MS); end = jiffies + msecs_to_jiffies(SPI_FSI_INIT_TIMEOUT_MS);
do { do {
...@@ -396,18 +470,22 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx) ...@@ -396,18 +470,22 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx)
static int fsi_spi_transfer_one_message(struct spi_controller *ctlr, static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *mesg) struct spi_message *mesg)
{ {
int rc = 0; int rc;
u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(mesg->spi->chip_select + 1); u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(mesg->spi->chip_select + 1);
struct spi_transfer *transfer; struct spi_transfer *transfer;
struct fsi_spi *ctx = spi_controller_get_devdata(ctlr); struct fsi_spi *ctx = spi_controller_get_devdata(ctlr);
rc = fsi_spi_check_mux(ctx->fsi, ctx->dev);
if (rc)
return rc;
list_for_each_entry(transfer, &mesg->transfers, transfer_list) { list_for_each_entry(transfer, &mesg->transfers, transfer_list) {
struct fsi_spi_sequence seq; struct fsi_spi_sequence seq;
struct spi_transfer *next = NULL; struct spi_transfer *next = NULL;
/* Sequencer must do shift out (tx) first. */ /* Sequencer must do shift out (tx) first. */
if (!transfer->tx_buf || if (!transfer->tx_buf ||
transfer->len > SPI_FSI_MAX_TRANSFER_SIZE) { transfer->len > (ctx->max_xfr_size + 8)) {
rc = -EINVAL; rc = -EINVAL;
goto error; goto error;
} }
...@@ -431,7 +509,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr, ...@@ -431,7 +509,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
/* Sequencer can only do shift in (rx) after tx. */ /* Sequencer can only do shift in (rx) after tx. */
if (next->rx_buf) { if (next->rx_buf) {
if (next->len > SPI_FSI_MAX_TRANSFER_SIZE) { if (next->len > ctx->max_xfr_size) {
rc = -EINVAL; rc = -EINVAL;
goto error; goto error;
} }
...@@ -476,30 +554,21 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr, ...@@ -476,30 +554,21 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
static size_t fsi_spi_max_transfer_size(struct spi_device *spi) static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
{ {
return SPI_FSI_MAX_TRANSFER_SIZE; struct fsi_spi *ctx = spi_controller_get_devdata(spi->controller);
return ctx->max_xfr_size;
} }
static int fsi_spi_probe(struct device *dev) static int fsi_spi_probe(struct device *dev)
{ {
int rc; int rc;
u32 root_ctrl_8;
struct device_node *np; struct device_node *np;
int num_controllers_registered = 0; int num_controllers_registered = 0;
struct fsi_device *fsi = to_fsi_dev(dev); struct fsi_device *fsi = to_fsi_dev(dev);
/* rc = fsi_spi_check_mux(fsi, dev);
* Check the SPI mux before attempting to probe. If the mux isn't set
* then the SPI controllers can't access their slave devices.
*/
rc = fsi_slave_read(fsi->slave, FSI_MBOX_ROOT_CTRL_8, &root_ctrl_8,
sizeof(root_ctrl_8));
if (rc) if (rc)
return rc;
if (!root_ctrl_8) {
dev_dbg(dev, "SPI mux not set, aborting probe.\n");
return -ENODEV; return -ENODEV;
}
for_each_available_child_of_node(dev->of_node, np) { for_each_available_child_of_node(dev->of_node, np) {
u32 base; u32 base;
...@@ -524,6 +593,14 @@ static int fsi_spi_probe(struct device *dev) ...@@ -524,6 +593,14 @@ static int fsi_spi_probe(struct device *dev)
ctx->fsi = fsi; ctx->fsi = fsi;
ctx->base = base + SPI_FSI_BASE; ctx->base = base + SPI_FSI_BASE;
if (of_device_is_compatible(np, "ibm,fsi2spi-restricted")) {
ctx->restricted = true;
ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE_RESTRICTED;
} else {
ctx->restricted = false;
ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE;
}
rc = devm_spi_register_controller(dev, ctlr); rc = devm_spi_register_controller(dev, ctlr);
if (rc) if (rc)
spi_controller_put(ctlr); spi_controller_put(ctlr);
......
...@@ -53,7 +53,6 @@ ...@@ -53,7 +53,6 @@
#define SPI_SR 0x2c #define SPI_SR 0x2c
#define SPI_SR_TCFQF BIT(31) #define SPI_SR_TCFQF BIT(31)
#define SPI_SR_EOQF BIT(28)
#define SPI_SR_TFUF BIT(27) #define SPI_SR_TFUF BIT(27)
#define SPI_SR_TFFF BIT(25) #define SPI_SR_TFFF BIT(25)
#define SPI_SR_CMDTCF BIT(23) #define SPI_SR_CMDTCF BIT(23)
...@@ -62,7 +61,7 @@ ...@@ -62,7 +61,7 @@
#define SPI_SR_TFIWF BIT(18) #define SPI_SR_TFIWF BIT(18)
#define SPI_SR_RFDF BIT(17) #define SPI_SR_RFDF BIT(17)
#define SPI_SR_CMDFFF BIT(16) #define SPI_SR_CMDFFF BIT(16)
#define SPI_SR_CLEAR (SPI_SR_TCFQF | SPI_SR_EOQF | \ #define SPI_SR_CLEAR (SPI_SR_TCFQF | \
SPI_SR_TFUF | SPI_SR_TFFF | \ SPI_SR_TFUF | SPI_SR_TFFF | \
SPI_SR_CMDTCF | SPI_SR_SPEF | \ SPI_SR_CMDTCF | SPI_SR_SPEF | \
SPI_SR_RFOF | SPI_SR_TFIWF | \ SPI_SR_RFOF | SPI_SR_TFIWF | \
...@@ -75,7 +74,6 @@ ...@@ -75,7 +74,6 @@
#define SPI_RSER 0x30 #define SPI_RSER 0x30
#define SPI_RSER_TCFQE BIT(31) #define SPI_RSER_TCFQE BIT(31)
#define SPI_RSER_EOQFE BIT(28)
#define SPI_RSER_CMDTCFE BIT(23) #define SPI_RSER_CMDTCFE BIT(23)
#define SPI_PUSHR 0x34 #define SPI_PUSHR 0x34
...@@ -114,7 +112,6 @@ struct chip_data { ...@@ -114,7 +112,6 @@ struct chip_data {
}; };
enum dspi_trans_mode { enum dspi_trans_mode {
DSPI_EOQ_MODE = 0,
DSPI_XSPI_MODE, DSPI_XSPI_MODE,
DSPI_DMA_MODE, DSPI_DMA_MODE,
}; };
...@@ -189,7 +186,7 @@ static const struct fsl_dspi_devtype_data devtype_data[] = { ...@@ -189,7 +186,7 @@ static const struct fsl_dspi_devtype_data devtype_data[] = {
.fifo_size = 4, .fifo_size = 4,
}, },
[MCF5441X] = { [MCF5441X] = {
.trans_mode = DSPI_EOQ_MODE, .trans_mode = DSPI_DMA_MODE,
.max_clock_factor = 8, .max_clock_factor = 8,
.fifo_size = 16, .fifo_size = 16,
}, },
...@@ -671,11 +668,6 @@ static void ns_delay_scale(char *psc, char *sc, int delay_ns, ...@@ -671,11 +668,6 @@ static void ns_delay_scale(char *psc, char *sc, int delay_ns,
} }
} }
static void dspi_pushr_write(struct fsl_dspi *dspi)
{
regmap_write(dspi->regmap, SPI_PUSHR, dspi_pop_tx_pushr(dspi));
}
static void dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd) static void dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd)
{ {
/* /*
...@@ -735,21 +727,6 @@ static void dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words) ...@@ -735,21 +727,6 @@ static void dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words)
} }
} }
static void dspi_eoq_fifo_write(struct fsl_dspi *dspi, int num_words)
{
u16 xfer_cmd = dspi->tx_cmd;
/* Fill TX FIFO with as many transfers as possible */
while (num_words--) {
dspi->tx_cmd = xfer_cmd;
/* Request EOQF for last transfer in FIFO */
if (num_words == 0)
dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
/* Write combined TX FIFO and CMD FIFO entry */
dspi_pushr_write(dspi);
}
}
static u32 dspi_popr_read(struct fsl_dspi *dspi) static u32 dspi_popr_read(struct fsl_dspi *dspi)
{ {
u32 rxdata = 0; u32 rxdata = 0;
...@@ -818,7 +795,7 @@ static void dspi_setup_accel(struct fsl_dspi *dspi) ...@@ -818,7 +795,7 @@ static void dspi_setup_accel(struct fsl_dspi *dspi)
dspi->oper_word_size = DIV_ROUND_UP(dspi->oper_bits_per_word, 8); dspi->oper_word_size = DIV_ROUND_UP(dspi->oper_bits_per_word, 8);
/* /*
* Update CTAR here (code is common for EOQ, XSPI and DMA modes). * Update CTAR here (code is common for XSPI and DMA modes).
* We will update CTARE in the portion specific to XSPI, when we * We will update CTARE in the portion specific to XSPI, when we
* also know the preload value (DTCP). * also know the preload value (DTCP).
*/ */
...@@ -862,9 +839,6 @@ static void dspi_fifo_write(struct fsl_dspi *dspi) ...@@ -862,9 +839,6 @@ static void dspi_fifo_write(struct fsl_dspi *dspi)
spi_take_timestamp_pre(dspi->ctlr, xfer, dspi->progress, !dspi->irq); spi_take_timestamp_pre(dspi->ctlr, xfer, dspi->progress, !dspi->irq);
if (dspi->devtype_data->trans_mode == DSPI_EOQ_MODE)
dspi_eoq_fifo_write(dspi, num_words);
else
dspi_xspi_fifo_write(dspi, num_words); dspi_xspi_fifo_write(dspi, num_words);
/* /*
* Everything after this point is in a potential race with the next * Everything after this point is in a potential race with the next
...@@ -898,7 +872,7 @@ static int dspi_poll(struct fsl_dspi *dspi) ...@@ -898,7 +872,7 @@ static int dspi_poll(struct fsl_dspi *dspi)
regmap_read(dspi->regmap, SPI_SR, &spi_sr); regmap_read(dspi->regmap, SPI_SR, &spi_sr);
regmap_write(dspi->regmap, SPI_SR, spi_sr); regmap_write(dspi->regmap, SPI_SR, spi_sr);
if (spi_sr & (SPI_SR_EOQF | SPI_SR_CMDTCF)) if (spi_sr & SPI_SR_CMDTCF)
break; break;
} while (--tries); } while (--tries);
...@@ -916,7 +890,7 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id) ...@@ -916,7 +890,7 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
regmap_read(dspi->regmap, SPI_SR, &spi_sr); regmap_read(dspi->regmap, SPI_SR, &spi_sr);
regmap_write(dspi->regmap, SPI_SR, spi_sr); regmap_write(dspi->regmap, SPI_SR, spi_sr);
if (!(spi_sr & (SPI_SR_EOQF | SPI_SR_CMDTCF))) if (!(spi_sr & SPI_SR_CMDTCF))
return IRQ_NONE; return IRQ_NONE;
if (dspi_rxtx(dspi) == 0) if (dspi_rxtx(dspi) == 0)
...@@ -1204,9 +1178,6 @@ static int dspi_init(struct fsl_dspi *dspi) ...@@ -1204,9 +1178,6 @@ static int dspi_init(struct fsl_dspi *dspi)
regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
switch (dspi->devtype_data->trans_mode) { switch (dspi->devtype_data->trans_mode) {
case DSPI_EOQ_MODE:
regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
break;
case DSPI_XSPI_MODE: case DSPI_XSPI_MODE:
regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE); regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE);
break; break;
...@@ -1245,22 +1216,6 @@ static int dspi_slave_abort(struct spi_master *master) ...@@ -1245,22 +1216,6 @@ static int dspi_slave_abort(struct spi_master *master)
return 0; return 0;
} }
/*
* EOQ mode will inevitably deassert its PCS signal on last word in a queue
* (hardware limitation), so we need to inform the spi_device that larger
* buffers than the FIFO size are going to have the chip select randomly
* toggling, so it has a chance to adapt its message sizes.
*/
static size_t dspi_max_message_size(struct spi_device *spi)
{
struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
if (dspi->devtype_data->trans_mode == DSPI_EOQ_MODE)
return dspi->devtype_data->fifo_size;
return SIZE_MAX;
}
static int dspi_probe(struct platform_device *pdev) static int dspi_probe(struct platform_device *pdev)
{ {
struct device_node *np = pdev->dev.of_node; struct device_node *np = pdev->dev.of_node;
...@@ -1289,7 +1244,6 @@ static int dspi_probe(struct platform_device *pdev) ...@@ -1289,7 +1244,6 @@ static int dspi_probe(struct platform_device *pdev)
ctlr->setup = dspi_setup; ctlr->setup = dspi_setup;
ctlr->transfer_one_message = dspi_transfer_one_message; ctlr->transfer_one_message = dspi_transfer_one_message;
ctlr->max_message_size = dspi_max_message_size;
ctlr->dev.of_node = pdev->dev.of_node; ctlr->dev.of_node = pdev->dev.of_node;
ctlr->cleanup = dspi_cleanup; ctlr->cleanup = dspi_cleanup;
......
...@@ -731,7 +731,7 @@ static int fsl_espi_probe(struct device *dev, struct resource *mem, ...@@ -731,7 +731,7 @@ static int fsl_espi_probe(struct device *dev, struct resource *mem,
if (ret < 0) if (ret < 0)
goto err_pm; goto err_pm;
dev_info(dev, "at 0x%p (irq = %u)\n", espi->reg_base, irq); dev_info(dev, "irq = %u\n", irq);
pm_runtime_mark_last_busy(dev); pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev); pm_runtime_put_autosuspend(dev);
......
...@@ -944,8 +944,7 @@ static int fsl_lpspi_remove(struct platform_device *pdev) ...@@ -944,8 +944,7 @@ static int fsl_lpspi_remove(struct platform_device *pdev)
return 0; return 0;
} }
#ifdef CONFIG_PM_SLEEP static int __maybe_unused fsl_lpspi_suspend(struct device *dev)
static int fsl_lpspi_suspend(struct device *dev)
{ {
int ret; int ret;
...@@ -954,7 +953,7 @@ static int fsl_lpspi_suspend(struct device *dev) ...@@ -954,7 +953,7 @@ static int fsl_lpspi_suspend(struct device *dev)
return ret; return ret;
} }
static int fsl_lpspi_resume(struct device *dev) static int __maybe_unused fsl_lpspi_resume(struct device *dev)
{ {
int ret; int ret;
...@@ -968,7 +967,6 @@ static int fsl_lpspi_resume(struct device *dev) ...@@ -968,7 +967,6 @@ static int fsl_lpspi_resume(struct device *dev)
return 0; return 0;
} }
#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops fsl_lpspi_pm_ops = { static const struct dev_pm_ops fsl_lpspi_pm_ops = {
SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend, SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend,
......
...@@ -290,6 +290,7 @@ static int spi_geni_init(struct spi_geni_master *mas) ...@@ -290,6 +290,7 @@ static int spi_geni_init(struct spi_geni_master *mas)
{ {
struct geni_se *se = &mas->se; struct geni_se *se = &mas->se;
unsigned int proto, major, minor, ver; unsigned int proto, major, minor, ver;
u32 spi_tx_cfg;
pm_runtime_get_sync(mas->dev); pm_runtime_get_sync(mas->dev);
...@@ -308,7 +309,7 @@ static int spi_geni_init(struct spi_geni_master *mas) ...@@ -308,7 +309,7 @@ static int spi_geni_init(struct spi_geni_master *mas)
* Hardware programming guide suggests to configure * Hardware programming guide suggests to configure
* RX FIFO RFR level to fifo_depth-2. * RX FIFO RFR level to fifo_depth-2.
*/ */
geni_se_init(se, mas->tx_fifo_depth / 2, mas->tx_fifo_depth - 2); geni_se_init(se, mas->tx_fifo_depth - 3, mas->tx_fifo_depth - 2);
/* Transmit an entire FIFO worth of data per IRQ */ /* Transmit an entire FIFO worth of data per IRQ */
mas->tx_wm = 1; mas->tx_wm = 1;
ver = geni_se_get_qup_hw_version(se); ver = geni_se_get_qup_hw_version(se);
...@@ -322,16 +323,103 @@ static int spi_geni_init(struct spi_geni_master *mas) ...@@ -322,16 +323,103 @@ static int spi_geni_init(struct spi_geni_master *mas)
geni_se_select_mode(se, GENI_SE_FIFO); geni_se_select_mode(se, GENI_SE_FIFO);
/* We always control CS manually */
spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
spi_tx_cfg &= ~CS_TOGGLE;
writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
pm_runtime_put(mas->dev); pm_runtime_put(mas->dev);
return 0; return 0;
} }
static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
{
/*
* Calculate how many bytes we'll put in each FIFO word. If the
* transfer words don't pack cleanly into a FIFO word we'll just put
* one transfer word in each FIFO word. If they do pack we'll pack 'em.
*/
if (mas->fifo_width_bits % mas->cur_bits_per_word)
return roundup_pow_of_two(DIV_ROUND_UP(mas->cur_bits_per_word,
BITS_PER_BYTE));
return mas->fifo_width_bits / BITS_PER_BYTE;
}
static bool geni_spi_handle_tx(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
unsigned int max_bytes;
const u8 *tx_buf;
unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
unsigned int i = 0;
max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
if (mas->tx_rem_bytes < max_bytes)
max_bytes = mas->tx_rem_bytes;
tx_buf = mas->cur_xfer->tx_buf + mas->cur_xfer->len - mas->tx_rem_bytes;
while (i < max_bytes) {
unsigned int j;
unsigned int bytes_to_write;
u32 fifo_word = 0;
u8 *fifo_byte = (u8 *)&fifo_word;
bytes_to_write = min(bytes_per_fifo_word, max_bytes - i);
for (j = 0; j < bytes_to_write; j++)
fifo_byte[j] = tx_buf[i++];
iowrite32_rep(se->base + SE_GENI_TX_FIFOn, &fifo_word, 1);
}
mas->tx_rem_bytes -= max_bytes;
if (!mas->tx_rem_bytes) {
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
return false;
}
return true;
}
static void geni_spi_handle_rx(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
u32 rx_fifo_status;
unsigned int rx_bytes;
unsigned int rx_last_byte_valid;
u8 *rx_buf;
unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
unsigned int i = 0;
rx_fifo_status = readl(se->base + SE_GENI_RX_FIFO_STATUS);
rx_bytes = (rx_fifo_status & RX_FIFO_WC_MSK) * bytes_per_fifo_word;
if (rx_fifo_status & RX_LAST) {
rx_last_byte_valid = rx_fifo_status & RX_LAST_BYTE_VALID_MSK;
rx_last_byte_valid >>= RX_LAST_BYTE_VALID_SHFT;
if (rx_last_byte_valid && rx_last_byte_valid < 4)
rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
}
if (mas->rx_rem_bytes < rx_bytes)
rx_bytes = mas->rx_rem_bytes;
rx_buf = mas->cur_xfer->rx_buf + mas->cur_xfer->len - mas->rx_rem_bytes;
while (i < rx_bytes) {
u32 fifo_word = 0;
u8 *fifo_byte = (u8 *)&fifo_word;
unsigned int bytes_to_read;
unsigned int j;
bytes_to_read = min(bytes_per_fifo_word, rx_bytes - i);
ioread32_rep(se->base + SE_GENI_RX_FIFOn, &fifo_word, 1);
for (j = 0; j < bytes_to_read; j++)
rx_buf[i++] = fifo_byte[j];
}
mas->rx_rem_bytes -= rx_bytes;
}
static void setup_fifo_xfer(struct spi_transfer *xfer, static void setup_fifo_xfer(struct spi_transfer *xfer,
struct spi_geni_master *mas, struct spi_geni_master *mas,
u16 mode, struct spi_master *spi) u16 mode, struct spi_master *spi)
{ {
u32 m_cmd = 0; u32 m_cmd = 0;
u32 spi_tx_cfg, len; u32 len;
struct geni_se *se = &mas->se; struct geni_se *se = &mas->se;
int ret; int ret;
...@@ -350,7 +438,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer, ...@@ -350,7 +438,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
spin_lock_irq(&mas->lock); spin_lock_irq(&mas->lock);
spin_unlock_irq(&mas->lock); spin_unlock_irq(&mas->lock);
spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
if (xfer->bits_per_word != mas->cur_bits_per_word) { if (xfer->bits_per_word != mas->cur_bits_per_word) {
spi_setup_word_len(mas, mode, xfer->bits_per_word); spi_setup_word_len(mas, mode, xfer->bits_per_word);
mas->cur_bits_per_word = xfer->bits_per_word; mas->cur_bits_per_word = xfer->bits_per_word;
...@@ -364,8 +451,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer, ...@@ -364,8 +451,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
mas->tx_rem_bytes = 0; mas->tx_rem_bytes = 0;
mas->rx_rem_bytes = 0; mas->rx_rem_bytes = 0;
spi_tx_cfg &= ~CS_TOGGLE;
if (!(mas->cur_bits_per_word % MIN_WORD_LEN)) if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word; len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
else else
...@@ -384,7 +469,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer, ...@@ -384,7 +469,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
writel(len, se->base + SE_SPI_RX_TRANS_LEN); writel(len, se->base + SE_SPI_RX_TRANS_LEN);
mas->rx_rem_bytes = xfer->len; mas->rx_rem_bytes = xfer->len;
} }
writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
/* /*
* Lock around right before we start the transfer since our * Lock around right before we start the transfer since our
...@@ -398,8 +482,10 @@ static void setup_fifo_xfer(struct spi_transfer *xfer, ...@@ -398,8 +482,10 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
* setting up GENI SE engine, as driver starts data transfer * setting up GENI SE engine, as driver starts data transfer
* for the watermark interrupt. * for the watermark interrupt.
*/ */
if (m_cmd & SPI_TX_ONLY) if (m_cmd & SPI_TX_ONLY) {
if (geni_spi_handle_tx(mas))
writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG); writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
}
spin_unlock_irq(&mas->lock); spin_unlock_irq(&mas->lock);
} }
...@@ -417,85 +503,6 @@ static int spi_geni_transfer_one(struct spi_master *spi, ...@@ -417,85 +503,6 @@ static int spi_geni_transfer_one(struct spi_master *spi,
return 1; return 1;
} }
static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
{
/*
* Calculate how many bytes we'll put in each FIFO word. If the
* transfer words don't pack cleanly into a FIFO word we'll just put
* one transfer word in each FIFO word. If they do pack we'll pack 'em.
*/
if (mas->fifo_width_bits % mas->cur_bits_per_word)
return roundup_pow_of_two(DIV_ROUND_UP(mas->cur_bits_per_word,
BITS_PER_BYTE));
return mas->fifo_width_bits / BITS_PER_BYTE;
}
static void geni_spi_handle_tx(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
unsigned int max_bytes;
const u8 *tx_buf;
unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
unsigned int i = 0;
max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
if (mas->tx_rem_bytes < max_bytes)
max_bytes = mas->tx_rem_bytes;
tx_buf = mas->cur_xfer->tx_buf + mas->cur_xfer->len - mas->tx_rem_bytes;
while (i < max_bytes) {
unsigned int j;
unsigned int bytes_to_write;
u32 fifo_word = 0;
u8 *fifo_byte = (u8 *)&fifo_word;
bytes_to_write = min(bytes_per_fifo_word, max_bytes - i);
for (j = 0; j < bytes_to_write; j++)
fifo_byte[j] = tx_buf[i++];
iowrite32_rep(se->base + SE_GENI_TX_FIFOn, &fifo_word, 1);
}
mas->tx_rem_bytes -= max_bytes;
if (!mas->tx_rem_bytes)
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
}
static void geni_spi_handle_rx(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
u32 rx_fifo_status;
unsigned int rx_bytes;
unsigned int rx_last_byte_valid;
u8 *rx_buf;
unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
unsigned int i = 0;
rx_fifo_status = readl(se->base + SE_GENI_RX_FIFO_STATUS);
rx_bytes = (rx_fifo_status & RX_FIFO_WC_MSK) * bytes_per_fifo_word;
if (rx_fifo_status & RX_LAST) {
rx_last_byte_valid = rx_fifo_status & RX_LAST_BYTE_VALID_MSK;
rx_last_byte_valid >>= RX_LAST_BYTE_VALID_SHFT;
if (rx_last_byte_valid && rx_last_byte_valid < 4)
rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
}
if (mas->rx_rem_bytes < rx_bytes)
rx_bytes = mas->rx_rem_bytes;
rx_buf = mas->cur_xfer->rx_buf + mas->cur_xfer->len - mas->rx_rem_bytes;
while (i < rx_bytes) {
u32 fifo_word = 0;
u8 *fifo_byte = (u8 *)&fifo_word;
unsigned int bytes_to_read;
unsigned int j;
bytes_to_read = min(bytes_per_fifo_word, rx_bytes - i);
ioread32_rep(se->base + SE_GENI_RX_FIFOn, &fifo_word, 1);
for (j = 0; j < bytes_to_read; j++)
rx_buf[i++] = fifo_byte[j];
}
mas->rx_rem_bytes -= rx_bytes;
}
static irqreturn_t geni_spi_isr(int irq, void *data) static irqreturn_t geni_spi_isr(int irq, void *data)
{ {
struct spi_master *spi = data; struct spi_master *spi = data;
...@@ -613,11 +620,9 @@ static int spi_geni_probe(struct platform_device *pdev) ...@@ -613,11 +620,9 @@ static int spi_geni_probe(struct platform_device *pdev)
return PTR_ERR(mas->se.opp_table); return PTR_ERR(mas->se.opp_table);
/* OPP table is optional */ /* OPP table is optional */
ret = dev_pm_opp_of_add_table(&pdev->dev); ret = dev_pm_opp_of_add_table(&pdev->dev);
if (!ret) { if (ret && ret != -ENODEV) {
mas->se.has_opp_table = true;
} else if (ret != -ENODEV) {
dev_err(&pdev->dev, "invalid OPP table in device tree\n"); dev_err(&pdev->dev, "invalid OPP table in device tree\n");
return ret; goto put_clkname;
} }
spi->bus_num = -1; spi->bus_num = -1;
...@@ -669,8 +674,8 @@ static int spi_geni_probe(struct platform_device *pdev) ...@@ -669,8 +674,8 @@ static int spi_geni_probe(struct platform_device *pdev)
spi_geni_probe_runtime_disable: spi_geni_probe_runtime_disable:
pm_runtime_disable(dev); pm_runtime_disable(dev);
spi_master_put(spi); spi_master_put(spi);
if (mas->se.has_opp_table)
dev_pm_opp_of_remove_table(&pdev->dev); dev_pm_opp_of_remove_table(&pdev->dev);
put_clkname:
dev_pm_opp_put_clkname(mas->se.opp_table); dev_pm_opp_put_clkname(mas->se.opp_table);
return ret; return ret;
} }
...@@ -685,7 +690,6 @@ static int spi_geni_remove(struct platform_device *pdev) ...@@ -685,7 +690,6 @@ static int spi_geni_remove(struct platform_device *pdev)
free_irq(mas->irq, spi); free_irq(mas->irq, spi);
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
if (mas->se.has_opp_table)
dev_pm_opp_of_remove_table(&pdev->dev); dev_pm_opp_of_remove_table(&pdev->dev);
dev_pm_opp_put_clkname(mas->se.opp_table); dev_pm_opp_put_clkname(mas->se.opp_table);
return 0; return 0;
......
This diff is collapsed.
...@@ -1503,6 +1503,8 @@ static int spi_imx_transfer(struct spi_device *spi, ...@@ -1503,6 +1503,8 @@ static int spi_imx_transfer(struct spi_device *spi,
{ {
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
transfer->effective_speed_hz = spi_imx->spi_bus_clk;
/* flush rxfifo before transfer */ /* flush rxfifo before transfer */
while (spi_imx->devtype_data->rx_available(spi_imx)) while (spi_imx->devtype_data->rx_available(spi_imx))
readl(spi_imx->base + MXC_CSPIRXDATA); readl(spi_imx->base + MXC_CSPIRXDATA);
...@@ -1695,7 +1697,7 @@ static int spi_imx_probe(struct platform_device *pdev) ...@@ -1695,7 +1697,7 @@ static int spi_imx_probe(struct platform_device *pdev)
goto out_runtime_pm_put; goto out_runtime_pm_put;
if (ret < 0) if (ret < 0)
dev_err(&pdev->dev, "dma setup error %d, use pio\n", dev_dbg(&pdev->dev, "dma setup error %d, use pio\n",
ret); ret);
} }
...@@ -1707,16 +1709,17 @@ static int spi_imx_probe(struct platform_device *pdev) ...@@ -1707,16 +1709,17 @@ static int spi_imx_probe(struct platform_device *pdev)
ret = spi_bitbang_start(&spi_imx->bitbang); ret = spi_bitbang_start(&spi_imx->bitbang);
if (ret) { if (ret) {
dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
goto out_runtime_pm_put; goto out_bitbang_start;
} }
dev_info(&pdev->dev, "probed\n");
pm_runtime_mark_last_busy(spi_imx->dev); pm_runtime_mark_last_busy(spi_imx->dev);
pm_runtime_put_autosuspend(spi_imx->dev); pm_runtime_put_autosuspend(spi_imx->dev);
return ret; return ret;
out_bitbang_start:
if (spi_imx->devtype_data->has_dmamode)
spi_imx_sdma_exit(spi_imx);
out_runtime_pm_put: out_runtime_pm_put:
pm_runtime_dont_use_autosuspend(spi_imx->dev); pm_runtime_dont_use_autosuspend(spi_imx->dev);
pm_runtime_put_sync(spi_imx->dev); pm_runtime_put_sync(spi_imx->dev);
......
...@@ -625,9 +625,8 @@ static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data) ...@@ -625,9 +625,8 @@ static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data)
struct lantiq_ssc_spi *spi = data; struct lantiq_ssc_spi *spi = data;
const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg; const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
u32 val = lantiq_ssc_readl(spi, hwcfg->irncr); u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
unsigned long flags;
spin_lock_irqsave(&spi->lock, flags); spin_lock(&spi->lock);
if (hwcfg->irq_ack) if (hwcfg->irq_ack)
lantiq_ssc_writel(spi, val, hwcfg->irncr); lantiq_ssc_writel(spi, val, hwcfg->irncr);
...@@ -652,12 +651,12 @@ static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data) ...@@ -652,12 +651,12 @@ static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data)
} }
} }
spin_unlock_irqrestore(&spi->lock, flags); spin_unlock(&spi->lock);
return IRQ_HANDLED; return IRQ_HANDLED;
completed: completed:
queue_work(spi->wq, &spi->work); queue_work(spi->wq, &spi->work);
spin_unlock_irqrestore(&spi->lock, flags); spin_unlock(&spi->lock);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -668,12 +667,11 @@ static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data) ...@@ -668,12 +667,11 @@ static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data)
const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg; const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT); u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT);
u32 val = lantiq_ssc_readl(spi, hwcfg->irncr); u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
unsigned long flags;
if (!(stat & LTQ_SPI_STAT_ERRORS)) if (!(stat & LTQ_SPI_STAT_ERRORS))
return IRQ_NONE; return IRQ_NONE;
spin_lock_irqsave(&spi->lock, flags); spin_lock(&spi->lock);
if (hwcfg->irq_ack) if (hwcfg->irq_ack)
lantiq_ssc_writel(spi, val, hwcfg->irncr); lantiq_ssc_writel(spi, val, hwcfg->irncr);
...@@ -697,7 +695,7 @@ static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data) ...@@ -697,7 +695,7 @@ static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data)
if (spi->master->cur_msg) if (spi->master->cur_msg)
spi->master->cur_msg->status = -EIO; spi->master->cur_msg->status = -EIO;
queue_work(spi->wq, &spi->work); queue_work(spi->wq, &spi->work);
spin_unlock_irqrestore(&spi->lock, flags); spin_unlock(&spi->lock);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
This diff is collapsed.
...@@ -139,9 +139,8 @@ static int spi_mux_probe(struct spi_device *spi) ...@@ -139,9 +139,8 @@ static int spi_mux_probe(struct spi_device *spi)
priv->mux = devm_mux_control_get(&spi->dev, NULL); priv->mux = devm_mux_control_get(&spi->dev, NULL);
if (IS_ERR(priv->mux)) { if (IS_ERR(priv->mux)) {
ret = PTR_ERR(priv->mux); ret = dev_err_probe(&spi->dev, PTR_ERR(priv->mux),
if (ret != -EPROBE_DEFER) "failed to get control-mux\n");
dev_err(&spi->dev, "failed to get control-mux\n");
goto err_put_ctlr; goto err_put_ctlr;
} }
......
...@@ -677,7 +677,6 @@ static int npcm_fiu_probe(struct platform_device *pdev) ...@@ -677,7 +677,6 @@ static int npcm_fiu_probe(struct platform_device *pdev)
struct npcm_fiu_spi *fiu; struct npcm_fiu_spi *fiu;
void __iomem *regbase; void __iomem *regbase;
struct resource *res; struct resource *res;
int ret;
int id; int id;
ctrl = spi_alloc_master(dev, sizeof(*fiu)); ctrl = spi_alloc_master(dev, sizeof(*fiu));
...@@ -736,11 +735,7 @@ static int npcm_fiu_probe(struct platform_device *pdev) ...@@ -736,11 +735,7 @@ static int npcm_fiu_probe(struct platform_device *pdev)
ctrl->num_chipselect = fiu->info->max_cs; ctrl->num_chipselect = fiu->info->max_cs;
ctrl->dev.of_node = dev->of_node; ctrl->dev.of_node = dev->of_node;
ret = devm_spi_register_master(dev, ctrl); return devm_spi_register_master(dev, ctrl);
if (ret)
return ret;
return 0;
} }
static int npcm_fiu_remove(struct platform_device *pdev) static int npcm_fiu_remove(struct platform_device *pdev)
......
...@@ -3,7 +3,8 @@ ...@@ -3,7 +3,8 @@
/* /*
* NXP FlexSPI(FSPI) controller driver. * NXP FlexSPI(FSPI) controller driver.
* *
* Copyright 2019 NXP. * Copyright 2019-2020 NXP
* Copyright 2020 Puresoftware Ltd.
* *
* FlexSPI is a flexsible SPI host controller which supports two SPI * FlexSPI is a flexsible SPI host controller which supports two SPI
* channels and up to 4 external devices. Each channel supports * channels and up to 4 external devices. Each channel supports
...@@ -30,6 +31,7 @@ ...@@ -30,6 +31,7 @@
* Frieder Schrempf <frieder.schrempf@kontron.de> * Frieder Schrempf <frieder.schrempf@kontron.de>
*/ */
#include <linux/acpi.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/completion.h> #include <linux/completion.h>
...@@ -563,6 +565,9 @@ static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f) ...@@ -563,6 +565,9 @@ static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f)
{ {
int ret; int ret;
if (is_acpi_node(f->dev->fwnode))
return 0;
ret = clk_prepare_enable(f->clk_en); ret = clk_prepare_enable(f->clk_en);
if (ret) if (ret)
return ret; return ret;
...@@ -576,10 +581,15 @@ static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f) ...@@ -576,10 +581,15 @@ static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f)
return 0; return 0;
} }
static void nxp_fspi_clk_disable_unprep(struct nxp_fspi *f) static int nxp_fspi_clk_disable_unprep(struct nxp_fspi *f)
{ {
if (is_acpi_node(f->dev->fwnode))
return 0;
clk_disable_unprepare(f->clk); clk_disable_unprepare(f->clk);
clk_disable_unprepare(f->clk_en); clk_disable_unprepare(f->clk_en);
return 0;
} }
/* /*
...@@ -1001,7 +1011,7 @@ static int nxp_fspi_probe(struct platform_device *pdev) ...@@ -1001,7 +1011,7 @@ static int nxp_fspi_probe(struct platform_device *pdev)
f = spi_controller_get_devdata(ctlr); f = spi_controller_get_devdata(ctlr);
f->dev = dev; f->dev = dev;
f->devtype_data = of_device_get_match_data(dev); f->devtype_data = device_get_match_data(dev);
if (!f->devtype_data) { if (!f->devtype_data) {
ret = -ENODEV; ret = -ENODEV;
goto err_put_ctrl; goto err_put_ctrl;
...@@ -1010,7 +1020,12 @@ static int nxp_fspi_probe(struct platform_device *pdev) ...@@ -1010,7 +1020,12 @@ static int nxp_fspi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, f); platform_set_drvdata(pdev, f);
/* find the resources - configuration register address space */ /* find the resources - configuration register address space */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_base"); if (is_acpi_node(f->dev->fwnode))
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
else
res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "fspi_base");
f->iobase = devm_ioremap_resource(dev, res); f->iobase = devm_ioremap_resource(dev, res);
if (IS_ERR(f->iobase)) { if (IS_ERR(f->iobase)) {
ret = PTR_ERR(f->iobase); ret = PTR_ERR(f->iobase);
...@@ -1018,7 +1033,12 @@ static int nxp_fspi_probe(struct platform_device *pdev) ...@@ -1018,7 +1033,12 @@ static int nxp_fspi_probe(struct platform_device *pdev)
} }
/* find the resources - controller memory mapped space */ /* find the resources - controller memory mapped space */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap"); if (is_acpi_node(f->dev->fwnode))
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
else
res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "fspi_mmap");
if (!res) { if (!res) {
ret = -ENODEV; ret = -ENODEV;
goto err_put_ctrl; goto err_put_ctrl;
...@@ -1029,6 +1049,7 @@ static int nxp_fspi_probe(struct platform_device *pdev) ...@@ -1029,6 +1049,7 @@ static int nxp_fspi_probe(struct platform_device *pdev)
f->memmap_phy_size = resource_size(res); f->memmap_phy_size = resource_size(res);
/* find the clocks */ /* find the clocks */
if (dev_of_node(&pdev->dev)) {
f->clk_en = devm_clk_get(dev, "fspi_en"); f->clk_en = devm_clk_get(dev, "fspi_en");
if (IS_ERR(f->clk_en)) { if (IS_ERR(f->clk_en)) {
ret = PTR_ERR(f->clk_en); ret = PTR_ERR(f->clk_en);
...@@ -1046,6 +1067,7 @@ static int nxp_fspi_probe(struct platform_device *pdev) ...@@ -1046,6 +1067,7 @@ static int nxp_fspi_probe(struct platform_device *pdev)
dev_err(dev, "can not enable the clock\n"); dev_err(dev, "can not enable the clock\n");
goto err_put_ctrl; goto err_put_ctrl;
} }
}
/* find the irq */ /* find the irq */
ret = platform_get_irq(pdev, 0); ret = platform_get_irq(pdev, 0);
...@@ -1127,6 +1149,14 @@ static const struct of_device_id nxp_fspi_dt_ids[] = { ...@@ -1127,6 +1149,14 @@ static const struct of_device_id nxp_fspi_dt_ids[] = {
}; };
MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids); MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids);
#ifdef CONFIG_ACPI
static const struct acpi_device_id nxp_fspi_acpi_ids[] = {
{ "NXP0009", .driver_data = (kernel_ulong_t)&lx2160a_data, },
{}
};
MODULE_DEVICE_TABLE(acpi, nxp_fspi_acpi_ids);
#endif
static const struct dev_pm_ops nxp_fspi_pm_ops = { static const struct dev_pm_ops nxp_fspi_pm_ops = {
.suspend = nxp_fspi_suspend, .suspend = nxp_fspi_suspend,
.resume = nxp_fspi_resume, .resume = nxp_fspi_resume,
...@@ -1136,6 +1166,7 @@ static struct platform_driver nxp_fspi_driver = { ...@@ -1136,6 +1166,7 @@ static struct platform_driver nxp_fspi_driver = {
.driver = { .driver = {
.name = "nxp-fspi", .name = "nxp-fspi",
.of_match_table = nxp_fspi_dt_ids, .of_match_table = nxp_fspi_dt_ids,
.acpi_match_table = ACPI_PTR(nxp_fspi_acpi_ids),
.pm = &nxp_fspi_pm_ops, .pm = &nxp_fspi_pm_ops,
}, },
.probe = nxp_fspi_probe, .probe = nxp_fspi_probe,
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/gcd.h> #include <linux/gcd.h>
#include <linux/iopoll.h>
#include <linux/spi/spi.h> #include <linux/spi/spi.h>
...@@ -348,9 +347,19 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi, ...@@ -348,9 +347,19 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
{ {
u32 val; unsigned long timeout;
return readl_poll_timeout(reg, val, val & bit, 1, MSEC_PER_SEC); timeout = jiffies + msecs_to_jiffies(1000);
while (!(readl_relaxed(reg) & bit)) {
if (time_after(jiffies, timeout)) {
if (!(readl_relaxed(reg) & bit))
return -ETIMEDOUT;
else
return 0;
}
cpu_relax();
}
return 0;
} }
static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi, static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi,
......
...@@ -143,7 +143,6 @@ struct qcom_qspi { ...@@ -143,7 +143,6 @@ struct qcom_qspi {
struct qspi_xfer xfer; struct qspi_xfer xfer;
struct icc_path *icc_path_cpu_to_qspi; struct icc_path *icc_path_cpu_to_qspi;
struct opp_table *opp_table; struct opp_table *opp_table;
bool has_opp_table;
unsigned long last_speed; unsigned long last_speed;
/* Lock to protect data accessed by IRQs */ /* Lock to protect data accessed by IRQs */
spinlock_t lock; spinlock_t lock;
...@@ -421,9 +420,8 @@ static irqreturn_t qcom_qspi_irq(int irq, void *dev_id) ...@@ -421,9 +420,8 @@ static irqreturn_t qcom_qspi_irq(int irq, void *dev_id)
u32 int_status; u32 int_status;
struct qcom_qspi *ctrl = dev_id; struct qcom_qspi *ctrl = dev_id;
irqreturn_t ret = IRQ_NONE; irqreturn_t ret = IRQ_NONE;
unsigned long flags;
spin_lock_irqsave(&ctrl->lock, flags); spin_lock(&ctrl->lock);
int_status = readl(ctrl->base + MSTR_INT_STATUS); int_status = readl(ctrl->base + MSTR_INT_STATUS);
writel(int_status, ctrl->base + MSTR_INT_STATUS); writel(int_status, ctrl->base + MSTR_INT_STATUS);
...@@ -451,7 +449,7 @@ static irqreturn_t qcom_qspi_irq(int irq, void *dev_id) ...@@ -451,7 +449,7 @@ static irqreturn_t qcom_qspi_irq(int irq, void *dev_id)
spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev)); spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
} }
spin_unlock_irqrestore(&ctrl->lock, flags); spin_unlock(&ctrl->lock);
return ret; return ret;
} }
...@@ -495,9 +493,8 @@ static int qcom_qspi_probe(struct platform_device *pdev) ...@@ -495,9 +493,8 @@ static int qcom_qspi_probe(struct platform_device *pdev)
ctrl->icc_path_cpu_to_qspi = devm_of_icc_get(dev, "qspi-config"); ctrl->icc_path_cpu_to_qspi = devm_of_icc_get(dev, "qspi-config");
if (IS_ERR(ctrl->icc_path_cpu_to_qspi)) { if (IS_ERR(ctrl->icc_path_cpu_to_qspi)) {
ret = PTR_ERR(ctrl->icc_path_cpu_to_qspi); ret = dev_err_probe(dev, PTR_ERR(ctrl->icc_path_cpu_to_qspi),
if (ret != -EPROBE_DEFER) "Failed to get cpu path\n");
dev_err(dev, "Failed to get cpu path: %d\n", ret);
goto exit_probe_master_put; goto exit_probe_master_put;
} }
/* Set BW vote for register access */ /* Set BW vote for register access */
...@@ -546,11 +543,9 @@ static int qcom_qspi_probe(struct platform_device *pdev) ...@@ -546,11 +543,9 @@ static int qcom_qspi_probe(struct platform_device *pdev)
} }
/* OPP table is optional */ /* OPP table is optional */
ret = dev_pm_opp_of_add_table(&pdev->dev); ret = dev_pm_opp_of_add_table(&pdev->dev);
if (!ret) { if (ret && ret != -ENODEV) {
ctrl->has_opp_table = true;
} else if (ret != -ENODEV) {
dev_err(&pdev->dev, "invalid OPP table in device tree\n"); dev_err(&pdev->dev, "invalid OPP table in device tree\n");
goto exit_probe_master_put; goto exit_probe_put_clkname;
} }
pm_runtime_use_autosuspend(dev); pm_runtime_use_autosuspend(dev);
...@@ -562,8 +557,9 @@ static int qcom_qspi_probe(struct platform_device *pdev) ...@@ -562,8 +557,9 @@ static int qcom_qspi_probe(struct platform_device *pdev)
return 0; return 0;
pm_runtime_disable(dev); pm_runtime_disable(dev);
if (ctrl->has_opp_table)
dev_pm_opp_of_remove_table(&pdev->dev); dev_pm_opp_of_remove_table(&pdev->dev);
exit_probe_put_clkname:
dev_pm_opp_put_clkname(ctrl->opp_table); dev_pm_opp_put_clkname(ctrl->opp_table);
exit_probe_master_put: exit_probe_master_put:
...@@ -581,7 +577,6 @@ static int qcom_qspi_remove(struct platform_device *pdev) ...@@ -581,7 +577,6 @@ static int qcom_qspi_remove(struct platform_device *pdev)
spi_unregister_master(master); spi_unregister_master(master);
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
if (ctrl->has_opp_table)
dev_pm_opp_of_remove_table(&pdev->dev); dev_pm_opp_of_remove_table(&pdev->dev);
dev_pm_opp_put_clkname(ctrl->opp_table); dev_pm_opp_put_clkname(ctrl->opp_table);
......
...@@ -848,7 +848,7 @@ static int spi_qup_transfer_one(struct spi_master *master, ...@@ -848,7 +848,7 @@ static int spi_qup_transfer_one(struct spi_master *master,
{ {
struct spi_qup *controller = spi_master_get_devdata(master); struct spi_qup *controller = spi_master_get_devdata(master);
unsigned long timeout, flags; unsigned long timeout, flags;
int ret = -EIO; int ret;
ret = spi_qup_io_prep(spi, xfer); ret = spi_qup_io_prep(spi, xfer);
if (ret) if (ret)
......
...@@ -161,6 +161,7 @@ ...@@ -161,6 +161,7 @@
#define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */ #define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */
#define SPCMD_SSLA(i) ((i) << 4) /* SSL Assert Signal Setting */ #define SPCMD_SSLA(i) ((i) << 4) /* SSL Assert Signal Setting */
#define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */ #define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */
#define SPCMD_BRDV(brdv) ((brdv) << 2)
#define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */ #define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */
#define SPCMD_CPHA 0x0001 /* Clock Phase Setting */ #define SPCMD_CPHA 0x0001 /* Clock Phase Setting */
...@@ -242,24 +243,40 @@ struct spi_ops { ...@@ -242,24 +243,40 @@ struct spi_ops {
int (*transfer_one)(struct spi_controller *ctlr, int (*transfer_one)(struct spi_controller *ctlr,
struct spi_device *spi, struct spi_transfer *xfer); struct spi_device *spi, struct spi_transfer *xfer);
u16 extra_mode_bits; u16 extra_mode_bits;
u16 min_div;
u16 max_div;
u16 flags; u16 flags;
u16 fifo_size; u16 fifo_size;
u8 num_hw_ss; u8 num_hw_ss;
}; };
static void rspi_set_rate(struct rspi_data *rspi)
{
unsigned long clksrc;
int brdv = 0, spbr;
clksrc = clk_get_rate(rspi->clk);
spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz) - 1;
while (spbr > 255 && brdv < 3) {
brdv++;
spbr = DIV_ROUND_UP(spbr + 1, 2) - 1;
}
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
rspi->spcmd |= SPCMD_BRDV(brdv);
rspi->speed_hz = DIV_ROUND_UP(clksrc, (2U << brdv) * (spbr + 1));
}
/* /*
* functions for RSPI on legacy SH * functions for RSPI on legacy SH
*/ */
static int rspi_set_config_register(struct rspi_data *rspi, int access_size) static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
{ {
int spbr;
/* Sets output mode, MOSI signal, and (optionally) loopback */ /* Sets output mode, MOSI signal, and (optionally) loopback */
rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR); rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
/* Sets transfer bit rate */ /* Sets transfer bit rate */
spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->speed_hz) - 1; rspi_set_rate(rspi);
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
/* Disable dummy transmission, set 16-bit word access, 1 frame */ /* Disable dummy transmission, set 16-bit word access, 1 frame */
rspi_write8(rspi, 0, RSPI_SPDCR); rspi_write8(rspi, 0, RSPI_SPDCR);
...@@ -289,25 +306,11 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size) ...@@ -289,25 +306,11 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
*/ */
static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size) static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
{ {
int spbr;
int div = 0;
unsigned long clksrc;
/* Sets output mode, MOSI signal, and (optionally) loopback */ /* Sets output mode, MOSI signal, and (optionally) loopback */
rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR); rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
clksrc = clk_get_rate(rspi->clk);
while (div < 3) {
if (rspi->speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */
break;
div++;
clksrc /= 2;
}
/* Sets transfer bit rate */ /* Sets transfer bit rate */
spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz) - 1; rspi_set_rate(rspi);
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
rspi->spcmd |= div << 2;
/* Disable dummy transmission, set byte access */ /* Disable dummy transmission, set byte access */
rspi_write8(rspi, SPDCR_SPLBYTE, RSPI_SPDCR); rspi_write8(rspi, SPDCR_SPLBYTE, RSPI_SPDCR);
...@@ -334,14 +337,28 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size) ...@@ -334,14 +337,28 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
*/ */
static int qspi_set_config_register(struct rspi_data *rspi, int access_size) static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
{ {
int spbr; unsigned long clksrc;
int brdv = 0, spbr;
/* Sets output mode, MOSI signal, and (optionally) loopback */ /* Sets output mode, MOSI signal, and (optionally) loopback */
rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR); rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
/* Sets transfer bit rate */ /* Sets transfer bit rate */
spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->speed_hz); clksrc = clk_get_rate(rspi->clk);
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); if (rspi->speed_hz >= clksrc) {
spbr = 0;
rspi->speed_hz = clksrc;
} else {
spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz);
while (spbr > 255 && brdv < 3) {
brdv++;
spbr = DIV_ROUND_UP(spbr, 2);
}
spbr = clamp(spbr, 0, 255);
rspi->speed_hz = DIV_ROUND_UP(clksrc, (2U << brdv) * spbr);
}
rspi_write8(rspi, spbr, RSPI_SPBR);
rspi->spcmd |= SPCMD_BRDV(brdv);
/* Disable dummy transmission, set byte access */ /* Disable dummy transmission, set byte access */
rspi_write8(rspi, 0, RSPI_SPDCR); rspi_write8(rspi, 0, RSPI_SPDCR);
...@@ -686,6 +703,8 @@ static int rspi_common_transfer(struct rspi_data *rspi, ...@@ -686,6 +703,8 @@ static int rspi_common_transfer(struct rspi_data *rspi,
{ {
int ret; int ret;
xfer->effective_speed_hz = rspi->speed_hz;
ret = rspi_dma_check_then_transfer(rspi, xfer); ret = rspi_dma_check_then_transfer(rspi, xfer);
if (ret != -EAGAIN) if (ret != -EAGAIN)
return ret; return ret;
...@@ -841,6 +860,7 @@ static int qspi_transfer_one(struct spi_controller *ctlr, ...@@ -841,6 +860,7 @@ static int qspi_transfer_one(struct spi_controller *ctlr,
{ {
struct rspi_data *rspi = spi_controller_get_devdata(ctlr); struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
xfer->effective_speed_hz = rspi->speed_hz;
if (spi->mode & SPI_LOOP) { if (spi->mode & SPI_LOOP) {
return qspi_transfer_out_in(rspi, xfer); return qspi_transfer_out_in(rspi, xfer);
} else if (xfer->tx_nbits > SPI_NBITS_SINGLE) { } else if (xfer->tx_nbits > SPI_NBITS_SINGLE) {
...@@ -1163,6 +1183,8 @@ static int rspi_remove(struct platform_device *pdev) ...@@ -1163,6 +1183,8 @@ static int rspi_remove(struct platform_device *pdev)
static const struct spi_ops rspi_ops = { static const struct spi_ops rspi_ops = {
.set_config_register = rspi_set_config_register, .set_config_register = rspi_set_config_register,
.transfer_one = rspi_transfer_one, .transfer_one = rspi_transfer_one,
.min_div = 2,
.max_div = 4096,
.flags = SPI_CONTROLLER_MUST_TX, .flags = SPI_CONTROLLER_MUST_TX,
.fifo_size = 8, .fifo_size = 8,
.num_hw_ss = 2, .num_hw_ss = 2,
...@@ -1171,6 +1193,8 @@ static const struct spi_ops rspi_ops = { ...@@ -1171,6 +1193,8 @@ static const struct spi_ops rspi_ops = {
static const struct spi_ops rspi_rz_ops = { static const struct spi_ops rspi_rz_ops = {
.set_config_register = rspi_rz_set_config_register, .set_config_register = rspi_rz_set_config_register,
.transfer_one = rspi_rz_transfer_one, .transfer_one = rspi_rz_transfer_one,
.min_div = 2,
.max_div = 4096,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 8, /* 8 for TX, 32 for RX */ .fifo_size = 8, /* 8 for TX, 32 for RX */
.num_hw_ss = 1, .num_hw_ss = 1,
...@@ -1181,6 +1205,8 @@ static const struct spi_ops qspi_ops = { ...@@ -1181,6 +1205,8 @@ static const struct spi_ops qspi_ops = {
.transfer_one = qspi_transfer_one, .transfer_one = qspi_transfer_one,
.extra_mode_bits = SPI_TX_DUAL | SPI_TX_QUAD | .extra_mode_bits = SPI_TX_DUAL | SPI_TX_QUAD |
SPI_RX_DUAL | SPI_RX_QUAD, SPI_RX_DUAL | SPI_RX_QUAD,
.min_div = 1,
.max_div = 4080,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 32, .fifo_size = 32,
.num_hw_ss = 1, .num_hw_ss = 1,
...@@ -1242,6 +1268,7 @@ static int rspi_probe(struct platform_device *pdev) ...@@ -1242,6 +1268,7 @@ static int rspi_probe(struct platform_device *pdev)
int ret; int ret;
const struct rspi_plat_data *rspi_pd; const struct rspi_plat_data *rspi_pd;
const struct spi_ops *ops; const struct spi_ops *ops;
unsigned long clksrc;
ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data)); ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
if (ctlr == NULL) if (ctlr == NULL)
...@@ -1261,13 +1288,6 @@ static int rspi_probe(struct platform_device *pdev) ...@@ -1261,13 +1288,6 @@ static int rspi_probe(struct platform_device *pdev)
ctlr->num_chipselect = 2; /* default */ ctlr->num_chipselect = 2; /* default */
} }
/* ops parameter check */
if (!ops->set_config_register) {
dev_err(&pdev->dev, "there is no set_config_register\n");
ret = -ENODEV;
goto error1;
}
rspi = spi_controller_get_devdata(ctlr); rspi = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, rspi); platform_set_drvdata(pdev, rspi);
rspi->ops = ops; rspi->ops = ops;
...@@ -1301,6 +1321,9 @@ static int rspi_probe(struct platform_device *pdev) ...@@ -1301,6 +1321,9 @@ static int rspi_probe(struct platform_device *pdev)
ctlr->unprepare_message = rspi_unprepare_message; ctlr->unprepare_message = rspi_unprepare_message;
ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST | ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST |
SPI_LOOP | ops->extra_mode_bits; SPI_LOOP | ops->extra_mode_bits;
clksrc = clk_get_rate(rspi->clk);
ctlr->min_speed_hz = DIV_ROUND_UP(clksrc, ops->max_div);
ctlr->max_speed_hz = DIV_ROUND_UP(clksrc, ops->min_div);
ctlr->flags = ops->flags; ctlr->flags = ops->flags;
ctlr->dev.of_node = pdev->dev.of_node; ctlr->dev.of_node = pdev->dev.of_node;
ctlr->use_gpio_descriptors = true; ctlr->use_gpio_descriptors = true;
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include "spi-s3c24xx-fiq.h" #include "spi-s3c24xx-fiq.h"
/** /**
* s3c24xx_spi_devstate - per device data * struct s3c24xx_spi_devstate - per device data
* @hz: Last frequency calculated for @sppre field. * @hz: Last frequency calculated for @sppre field.
* @mode: Last mode setting for the @spcon field. * @mode: Last mode setting for the @spcon field.
* @spcon: Value to write to the SPCON register. * @spcon: Value to write to the SPCON register.
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#define S3C64XX_SPI_CH_CFG 0x00 #define S3C64XX_SPI_CH_CFG 0x00
#define S3C64XX_SPI_CLK_CFG 0x04 #define S3C64XX_SPI_CLK_CFG 0x04
#define S3C64XX_SPI_MODE_CFG 0x08 #define S3C64XX_SPI_MODE_CFG 0x08
#define S3C64XX_SPI_SLAVE_SEL 0x0C #define S3C64XX_SPI_CS_REG 0x0C
#define S3C64XX_SPI_INT_EN 0x10 #define S3C64XX_SPI_INT_EN 0x10
#define S3C64XX_SPI_STATUS 0x14 #define S3C64XX_SPI_STATUS 0x14
#define S3C64XX_SPI_TX_DATA 0x18 #define S3C64XX_SPI_TX_DATA 0x18
...@@ -64,9 +64,9 @@ ...@@ -64,9 +64,9 @@
#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1) #define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
#define S3C64XX_SPI_MODE_4BURST (1<<0) #define S3C64XX_SPI_MODE_4BURST (1<<0)
#define S3C64XX_SPI_SLAVE_AUTO (1<<1) #define S3C64XX_SPI_CS_NSC_CNT_2 (2<<4)
#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0) #define S3C64XX_SPI_CS_AUTO (1<<1)
#define S3C64XX_SPI_SLAVE_NSC_CNT_2 (2<<4) #define S3C64XX_SPI_CS_SIG_INACT (1<<0)
#define S3C64XX_SPI_INT_TRAILING_EN (1<<6) #define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5) #define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
...@@ -122,6 +122,7 @@ ...@@ -122,6 +122,7 @@
struct s3c64xx_spi_dma_data { struct s3c64xx_spi_dma_data {
struct dma_chan *ch; struct dma_chan *ch;
dma_cookie_t cookie;
enum dma_transfer_direction direction; enum dma_transfer_direction direction;
}; };
...@@ -161,11 +162,8 @@ struct s3c64xx_spi_port_config { ...@@ -161,11 +162,8 @@ struct s3c64xx_spi_port_config {
* @cntrlr_info: Platform specific data for the controller this driver manages. * @cntrlr_info: Platform specific data for the controller this driver manages.
* @lock: Controller specific lock. * @lock: Controller specific lock.
* @state: Set of FLAGS to indicate status. * @state: Set of FLAGS to indicate status.
* @rx_dmach: Controller's DMA channel for Rx.
* @tx_dmach: Controller's DMA channel for Tx.
* @sfr_start: BUS address of SPI controller regs. * @sfr_start: BUS address of SPI controller regs.
* @regs: Pointer to ioremap'ed controller registers. * @regs: Pointer to ioremap'ed controller registers.
* @irq: interrupt
* @xfer_completion: To indicate completion of xfer task. * @xfer_completion: To indicate completion of xfer task.
* @cur_mode: Stores the active configuration of the controller. * @cur_mode: Stores the active configuration of the controller.
* @cur_bpw: Stores the active bits per word settings. * @cur_bpw: Stores the active bits per word settings.
...@@ -271,12 +269,13 @@ static void s3c64xx_spi_dmacb(void *data) ...@@ -271,12 +269,13 @@ static void s3c64xx_spi_dmacb(void *data)
spin_unlock_irqrestore(&sdd->lock, flags); spin_unlock_irqrestore(&sdd->lock, flags);
} }
static void prepare_dma(struct s3c64xx_spi_dma_data *dma, static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
struct sg_table *sgt) struct sg_table *sgt)
{ {
struct s3c64xx_spi_driver_data *sdd; struct s3c64xx_spi_driver_data *sdd;
struct dma_slave_config config; struct dma_slave_config config;
struct dma_async_tx_descriptor *desc; struct dma_async_tx_descriptor *desc;
int ret;
memset(&config, 0, sizeof(config)); memset(&config, 0, sizeof(config));
...@@ -300,12 +299,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma, ...@@ -300,12 +299,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents, desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
dma->direction, DMA_PREP_INTERRUPT); dma->direction, DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
return -ENOMEM;
}
desc->callback = s3c64xx_spi_dmacb; desc->callback = s3c64xx_spi_dmacb;
desc->callback_param = dma; desc->callback_param = dma;
dmaengine_submit(desc); dma->cookie = dmaengine_submit(desc);
ret = dma_submit_error(dma->cookie);
if (ret) {
dev_err(&sdd->pdev->dev, "DMA submission failed");
return -EIO;
}
dma_async_issue_pending(dma->ch); dma_async_issue_pending(dma->ch);
return 0;
} }
static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable) static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
...@@ -318,18 +329,18 @@ static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable) ...@@ -318,18 +329,18 @@ static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
if (enable) { if (enable) {
if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) { if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) {
writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL); writel(0, sdd->regs + S3C64XX_SPI_CS_REG);
} else { } else {
u32 ssel = readl(sdd->regs + S3C64XX_SPI_SLAVE_SEL); u32 ssel = readl(sdd->regs + S3C64XX_SPI_CS_REG);
ssel |= (S3C64XX_SPI_SLAVE_AUTO | ssel |= (S3C64XX_SPI_CS_AUTO |
S3C64XX_SPI_SLAVE_NSC_CNT_2); S3C64XX_SPI_CS_NSC_CNT_2);
writel(ssel, sdd->regs + S3C64XX_SPI_SLAVE_SEL); writel(ssel, sdd->regs + S3C64XX_SPI_CS_REG);
} }
} else { } else {
if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
writel(S3C64XX_SPI_SLAVE_SIG_INACT, writel(S3C64XX_SPI_CS_SIG_INACT,
sdd->regs + S3C64XX_SPI_SLAVE_SEL); sdd->regs + S3C64XX_SPI_CS_REG);
} }
} }
...@@ -355,11 +366,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master, ...@@ -355,11 +366,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master,
return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1; return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
} }
static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_transfer *xfer, int dma_mode) struct spi_transfer *xfer, int dma_mode)
{ {
void __iomem *regs = sdd->regs; void __iomem *regs = sdd->regs;
u32 modecfg, chcfg; u32 modecfg, chcfg;
int ret = 0;
modecfg = readl(regs + S3C64XX_SPI_MODE_CFG); modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
...@@ -385,7 +397,7 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, ...@@ -385,7 +397,7 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
chcfg |= S3C64XX_SPI_CH_TXCH_ON; chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) { if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
prepare_dma(&sdd->tx_dma, &xfer->tx_sg); ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
} else { } else {
switch (sdd->cur_bpw) { switch (sdd->cur_bpw) {
case 32: case 32:
...@@ -417,12 +429,17 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, ...@@ -417,12 +429,17 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN, | S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT); regs + S3C64XX_SPI_PACKET_CNT);
prepare_dma(&sdd->rx_dma, &xfer->rx_sg); ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
} }
} }
if (ret)
return ret;
writel(modecfg, regs + S3C64XX_SPI_MODE_CFG); writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
writel(chcfg, regs + S3C64XX_SPI_CH_CFG); writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
return 0;
} }
static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
...@@ -456,7 +473,8 @@ static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data *sdd, ...@@ -456,7 +473,8 @@ static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
/* millisecs to xfer 'len' bytes @ 'cur_speed' */ /* millisecs to xfer 'len' bytes @ 'cur_speed' */
ms = xfer->len * 8 * 1000 / sdd->cur_speed; ms = xfer->len * 8 * 1000 / sdd->cur_speed;
ms += 10; /* some tolerance */ ms += 30; /* some tolerance */
ms = max(ms, 100); /* minimum timeout */
val = msecs_to_jiffies(ms) + 10; val = msecs_to_jiffies(ms) + 10;
val = wait_for_completion_timeout(&sdd->xfer_completion, val); val = wait_for_completion_timeout(&sdd->xfer_completion, val);
...@@ -555,9 +573,10 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd, ...@@ -555,9 +573,10 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
return 0; return 0;
} }
static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
{ {
void __iomem *regs = sdd->regs; void __iomem *regs = sdd->regs;
int ret;
u32 val; u32 val;
/* Disable Clock */ /* Disable Clock */
...@@ -605,7 +624,10 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) ...@@ -605,7 +624,10 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
if (sdd->port_conf->clk_from_cmu) { if (sdd->port_conf->clk_from_cmu) {
/* The src_clk clock is divided internally by 2 */ /* The src_clk clock is divided internally by 2 */
clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
if (ret)
return ret;
sdd->cur_speed = clk_get_rate(sdd->src_clk) / 2;
} else { } else {
/* Configure Clock */ /* Configure Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG); val = readl(regs + S3C64XX_SPI_CLK_CFG);
...@@ -619,6 +641,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) ...@@ -619,6 +641,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
val |= S3C64XX_SPI_ENCLK_ENABLE; val |= S3C64XX_SPI_ENCLK_ENABLE;
writel(val, regs + S3C64XX_SPI_CLK_CFG); writel(val, regs + S3C64XX_SPI_CLK_CFG);
} }
return 0;
} }
#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
...@@ -661,7 +685,9 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, ...@@ -661,7 +685,9 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
sdd->cur_bpw = bpw; sdd->cur_bpw = bpw;
sdd->cur_speed = speed; sdd->cur_speed = speed;
sdd->cur_mode = spi->mode; sdd->cur_mode = spi->mode;
s3c64xx_spi_config(sdd); status = s3c64xx_spi_config(sdd);
if (status)
return status;
} }
if (!is_polling(sdd) && (xfer->len > fifo_len) && if (!is_polling(sdd) && (xfer->len > fifo_len) &&
...@@ -685,13 +711,18 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, ...@@ -685,13 +711,18 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
sdd->state &= ~RXBUSY; sdd->state &= ~RXBUSY;
sdd->state &= ~TXBUSY; sdd->state &= ~TXBUSY;
s3c64xx_enable_datapath(sdd, xfer, use_dma);
/* Start the signals */ /* Start the signals */
s3c64xx_spi_set_cs(spi, true); s3c64xx_spi_set_cs(spi, true);
status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
spin_unlock_irqrestore(&sdd->lock, flags); spin_unlock_irqrestore(&sdd->lock, flags);
if (status) {
dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
break;
}
if (use_dma) if (use_dma)
status = s3c64xx_wait_for_dma(sdd, xfer); status = s3c64xx_wait_for_dma(sdd, xfer);
else else
...@@ -699,17 +730,28 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, ...@@ -699,17 +730,28 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
if (status) { if (status) {
dev_err(&spi->dev, dev_err(&spi->dev,
"I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", "I/O Error: rx-%d tx-%d rx-%c tx-%c len-%d dma-%d res-(%d)\n",
xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
(sdd->state & RXBUSY) ? 'f' : 'p', (sdd->state & RXBUSY) ? 'f' : 'p',
(sdd->state & TXBUSY) ? 'f' : 'p', (sdd->state & TXBUSY) ? 'f' : 'p',
xfer->len); xfer->len, use_dma ? 1 : 0, status);
if (use_dma) { if (use_dma) {
if (xfer->tx_buf && (sdd->state & TXBUSY)) struct dma_tx_state s;
if (xfer->tx_buf && (sdd->state & TXBUSY)) {
dmaengine_pause(sdd->tx_dma.ch);
dmaengine_tx_status(sdd->tx_dma.ch, sdd->tx_dma.cookie, &s);
dmaengine_terminate_all(sdd->tx_dma.ch); dmaengine_terminate_all(sdd->tx_dma.ch);
if (xfer->rx_buf && (sdd->state & RXBUSY)) dev_err(&spi->dev, "TX residue: %d\n", s.residue);
}
if (xfer->rx_buf && (sdd->state & RXBUSY)) {
dmaengine_pause(sdd->rx_dma.ch);
dmaengine_tx_status(sdd->rx_dma.ch, sdd->rx_dma.cookie, &s);
dmaengine_terminate_all(sdd->rx_dma.ch); dmaengine_terminate_all(sdd->rx_dma.ch);
dev_err(&spi->dev, "RX residue: %d\n", s.residue);
}
} }
} else { } else {
s3c64xx_flush_fifo(sdd); s3c64xx_flush_fifo(sdd);
...@@ -939,9 +981,9 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd) ...@@ -939,9 +981,9 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd)
sdd->cur_speed = 0; sdd->cur_speed = 0;
if (sci->no_cs) if (sci->no_cs)
writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL); writel(0, sdd->regs + S3C64XX_SPI_CS_REG);
else if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) else if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); writel(S3C64XX_SPI_CS_SIG_INACT, sdd->regs + S3C64XX_SPI_CS_REG);
/* Disable Interrupts - we use Polling if not DMA mode */ /* Disable Interrupts - we use Polling if not DMA mode */
writel(0, regs + S3C64XX_SPI_INT_EN); writel(0, regs + S3C64XX_SPI_INT_EN);
...@@ -1336,6 +1378,10 @@ static int s3c64xx_spi_runtime_resume(struct device *dev) ...@@ -1336,6 +1378,10 @@ static int s3c64xx_spi_runtime_resume(struct device *dev)
s3c64xx_spi_hwinit(sdd); s3c64xx_spi_hwinit(sdd);
writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
sdd->regs + S3C64XX_SPI_INT_EN);
return 0; return 0;
err_disable_src_clk: err_disable_src_clk:
...@@ -1379,6 +1425,7 @@ static struct s3c64xx_spi_port_config exynos4_spi_port_config = { ...@@ -1379,6 +1425,7 @@ static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
.tx_st_done = 25, .tx_st_done = 25,
.high_speed = true, .high_speed = true,
.clk_from_cmu = true, .clk_from_cmu = true,
.quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
}; };
static struct s3c64xx_spi_port_config exynos7_spi_port_config = { static struct s3c64xx_spi_port_config exynos7_spi_port_config = {
......
...@@ -504,10 +504,7 @@ static int sprd_adi_probe(struct platform_device *pdev) ...@@ -504,10 +504,7 @@ static int sprd_adi_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "no hardware spinlock supplied\n"); dev_info(&pdev->dev, "no hardware spinlock supplied\n");
break; break;
default: default:
dev_err(&pdev->dev, dev_err_probe(&pdev->dev, ret, "failed to find hwlock id\n");
"failed to find hwlock id, %d\n", ret);
fallthrough;
case -EPROBE_DEFER:
goto put_ctlr; goto put_ctlr;
} }
} }
......
...@@ -553,22 +553,15 @@ static int sprd_spi_dma_tx_config(struct sprd_spi *ss, struct spi_transfer *t) ...@@ -553,22 +553,15 @@ static int sprd_spi_dma_tx_config(struct sprd_spi *ss, struct spi_transfer *t)
static int sprd_spi_dma_request(struct sprd_spi *ss) static int sprd_spi_dma_request(struct sprd_spi *ss)
{ {
ss->dma.dma_chan[SPRD_SPI_RX] = dma_request_chan(ss->dev, "rx_chn"); ss->dma.dma_chan[SPRD_SPI_RX] = dma_request_chan(ss->dev, "rx_chn");
if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_RX])) { if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_RX]))
if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]) == -EPROBE_DEFER) return dev_err_probe(ss->dev, PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]),
return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]); "request RX DMA channel failed!\n");
dev_err(ss->dev, "request RX DMA channel failed!\n");
return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]);
}
ss->dma.dma_chan[SPRD_SPI_TX] = dma_request_chan(ss->dev, "tx_chn"); ss->dma.dma_chan[SPRD_SPI_TX] = dma_request_chan(ss->dev, "tx_chn");
if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_TX])) { if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_TX])) {
if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]) == -EPROBE_DEFER)
return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]);
dev_err(ss->dev, "request TX DMA channel failed!\n");
dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]); dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]); return dev_err_probe(ss->dev, PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]),
"request TX DMA channel failed!\n");
} }
return 0; return 0;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -491,8 +491,7 @@ static int xilinx_spi_probe(struct platform_device *pdev) ...@@ -491,8 +491,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
goto put_master; goto put_master;
} }
dev_info(&pdev->dev, "at 0x%08llX mapped to 0x%p, irq=%d\n", dev_info(&pdev->dev, "at %pR, irq=%d\n", res, xspi->irq);
(unsigned long long)res->start, xspi->regs, xspi->irq);
if (pdata) { if (pdata) {
for (i = 0; i < pdata->num_devices; i++) for (i = 0; i < pdata->num_devices; i++)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment