Commit 69a06e49 authored by Bjorn Helgaas's avatar Bjorn Helgaas

Merge branches 'pci/host-aardvark', 'pci/host-altera', 'pci/host-artpec',...

Merge branches 'pci/host-aardvark', 'pci/host-altera', 'pci/host-artpec', 'pci/host-designware', 'pci/host-hv', 'pci/host-keystone', 'pci/host-rcar', 'pci/host-rockchip', 'pci/host-tegra' and 'pci/host-xilinx' into next

* pci/host-aardvark:
  PCI: aardvark: Remove redundant dev_err call in advk_pcie_probe()

* pci/host-altera:
  PCI: altera: Remove redundant platform_get_resource() return value check
  PCI: altera: Move retrain from fixup to altera_pcie_host_init()
  PCI: altera: Rework config accessors for use without a struct pci_bus
  PCI: altera: Poll for link training status after retraining the link

* pci/host-artpec:
  PCI: artpec6: Drop __init from artpec6_add_pcie_port()

* pci/host-designware:
  PCI: designware: Remove redundant platform_get_resource() return value check
  PCI: designware: Exchange viewport of `MEMORYs' and `CFGs/IOs'
  PCI: designware: Keep viewport fixed for IO transaction if num_viewport > 2
  PCI: designware: Check LTSSM training bit before deciding link is up
  PCI: designware: Add iATU Unroll feature
  PCI: designware: Wait for iATU enable
  PCI: designware: Move link wait definitions to .c file
  PCI: designware: Return data directly from dw_pcie_readl_rc()

* pci/host-hv:
  PCI: hv: Handle hv_pci_generic_compl() error case
  PCI: hv: Handle vmbus_sendpacket() failure in hv_compose_msi_msg()
  PCI: hv: Remove the unused 'wrk' in struct hv_pcibus_device
  PCI: hv: Use pci_function_description[0] in struct definitions
  PCI: hv: Use zero-length array in struct pci_packet
  PCI: hv: Use list_move_tail() instead of list_del() + list_add_tail()

* pci/host-keystone:
  PCI: keystone: Propagate request_irq() failure

* pci/host-rcar:
  PCI: rcar: Try increasing PCIe link speed to 5 GT/s at boot
  PCI: rcar: Fix some checkpatch warnings
  PCI: rcar: Add multi-MSI support
  PCI: rcar: Don't disable/unprepare clocks on prepare/enable failure
  PCI: rcar: Consolidate register space lookup and ioremap

* pci/host-rockchip:
  PCI: rockchip: Fix wrong transmitted FTS count
  PCI: rockchip: Improve the deassert sequence of four reset pins
  PCI: rockchip: Increase the Max Credit update interval
  PCI: rockchip: Add Rockchip PCIe controller support
  dt-bindings: PCI: rockchip: Add DT bindings for Rockchip PCIe controller

* pci/host-tegra:
  PCI: tegra: Use of_device_get_match_data()
  PCI: tegra: Remove redundant _data suffix

* pci/host-xilinx:
  microblaze/PCI: Add multidomain support for procfs
  PCI: xilinx: Dispose of MSI virtual IRQ
  PCI: xilinx: Clear correct MSI set bit
  PCI: xilinx: Clear interrupt register for invalid interrupt
  PCI: xilinx: Keep both legacy and MSI interrupt domain references
  PCI: xilinx-nwl: Enable all MSI interrupts using MSI mask
  PCI: xilinx-nwl: Expand error logging

Conflicts:
	drivers/pci/host/pcie-xilinx.c
...@@ -17,6 +17,8 @@ Required properties: ...@@ -17,6 +17,8 @@ Required properties:
- num-lanes: number of lanes to use - num-lanes: number of lanes to use
Optional properties: Optional properties:
- num-viewport: number of view ports configured in hardware. If a platform
does not specify it, the driver assumes 2.
- num-lanes: number of lanes to use (this property should be specified unless - num-lanes: number of lanes to use (this property should be specified unless
the link is brought already up in BIOS) the link is brought already up in BIOS)
- reset-gpio: gpio pin number of power good signal - reset-gpio: gpio pin number of power good signal
...@@ -44,4 +46,5 @@ Example configuration: ...@@ -44,4 +46,5 @@ Example configuration:
interrupts = <25>, <24>; interrupts = <25>, <24>;
#interrupt-cells = <1>; #interrupt-cells = <1>;
num-lanes = <1>; num-lanes = <1>;
num-viewport = <3>;
}; };
* Rockchip AXI PCIe Root Port Bridge DT description
Required properties:
- #address-cells: Address representation for root ports, set to <3>
- #size-cells: Size representation for root ports, set to <2>
- #interrupt-cells: specifies the number of cells needed to encode an
interrupt source. The value must be 1.
- compatible: Should contain "rockchip,rk3399-pcie"
- reg: Two register ranges as listed in the reg-names property
- reg-names: Must include the following names
- "axi-base"
- "apb-base"
- clocks: Must contain an entry for each entry in clock-names.
See ../clocks/clock-bindings.txt for details.
- clock-names: Must include the following entries:
- "aclk"
- "aclk-perf"
- "hclk"
- "pm"
- msi-map: Maps a Requester ID to an MSI controller and associated
msi-specifier data. See ./pci-msi.txt
- phys: From PHY bindings: Phandle for the Generic PHY for PCIe.
- phy-names: MUST be "pcie-phy".
- interrupts: Three interrupt entries must be specified.
- interrupt-names: Must include the following names
- "sys"
- "legacy"
- "client"
- resets: Must contain five entries for each entry in reset-names.
See ../reset/reset.txt for details.
- reset-names: Must include the following names
- "core"
- "mgmt"
- "mgmt-sticky"
- "pipe"
- pinctrl-names : The pin control state names
- pinctrl-0: The "default" pinctrl state
- #interrupt-cells: specifies the number of cells needed to encode an
interrupt source. The value must be 1.
- interrupt-map-mask and interrupt-map: standard PCI properties
Optional Property:
- ep-gpios: contain the entry for pre-reset gpio
- num-lanes: number of lanes to use
- vpcie3v3-supply: The phandle to the 3.3v regulator to use for PCIe.
- vpcie1v8-supply: The phandle to the 1.8v regulator to use for PCIe.
- vpcie0v9-supply: The phandle to the 0.9v regulator to use for PCIe.
*Interrupt controller child node*
The core controller provides a single interrupt for legacy INTx. The PCIe node
should contain an interrupt controller node as a target for the PCI
'interrupt-map' property. This node represents the domain at which the four
INTx interrupts are decoded and routed.
Required properties for Interrupt controller child node:
- interrupt-controller: identifies the node as an interrupt controller
- #address-cells: specifies the number of cells needed to encode an
address. The value must be 0.
- #interrupt-cells: specifies the number of cells needed to encode an
interrupt source. The value must be 1.
Example:
pcie0: pcie@f8000000 {
compatible = "rockchip,rk3399-pcie";
#address-cells = <3>;
#size-cells = <2>;
clocks = <&cru ACLK_PCIE>, <&cru ACLK_PERF_PCIE>,
<&cru PCLK_PCIE>, <&cru SCLK_PCIE_PM>;
clock-names = "aclk", "aclk-perf",
"hclk", "pm";
bus-range = <0x0 0x1>;
interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "sys", "legacy", "client";
assigned-clocks = <&cru SCLK_PCIEPHY_REF>;
assigned-clock-parents = <&cru SCLK_PCIEPHY_REF100M>;
assigned-clock-rates = <100000000>;
ep-gpios = <&gpio3 13 GPIO_ACTIVE_HIGH>;
ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000
0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>;
num-lanes = <4>;
msi-map = <0x0 &its 0x0 0x1000>;
reg = <0x0 0xf8000000 0x0 0x2000000>, <0x0 0xfd000000 0x0 0x1000000>;
reg-names = "axi-base", "apb-base";
resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
<&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>;
reset-names = "core", "mgmt", "mgmt-sticky", "pipe";
phys = <&pcie_phy>;
phy-names = "pcie-phy";
pinctrl-names = "default";
pinctrl-0 = <&pcie_clkreq>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie0_intc 0>,
<0 0 0 2 &pcie0_intc 1>,
<0 0 0 3 &pcie0_intc 2>,
<0 0 0 4 &pcie0_intc 3>;
pcie0_intc: interrupt-controller {
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <1>;
};
};
...@@ -9083,6 +9083,15 @@ S: Maintained ...@@ -9083,6 +9083,15 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
F: drivers/pci/host/pcie-hisi.c F: drivers/pci/host/pcie-hisi.c
PCIE DRIVER FOR ROCKCHIP
M: Shawn Lin <shawn.lin@rock-chips.com>
M: Wenrui Li <wenrui.li@rock-chips.com>
L: linux-pci@vger.kernel.org
L: linux-rockchip@lists.infradead.org
S: Maintained
F: Documentation/devicetree/bindings/pci/rockchip-pcie.txt
F: drivers/pci/host/pcie-rockchip.c
PCIE DRIVER FOR QUALCOMM MSM PCIE DRIVER FOR QUALCOMM MSM
M: Stanimir Varbanov <svarbanov@mm-sol.com> M: Stanimir Varbanov <svarbanov@mm-sol.com>
L: linux-pci@vger.kernel.org L: linux-pci@vger.kernel.org
......
...@@ -632,10 +632,10 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose, ...@@ -632,10 +632,10 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
} }
} }
/* Decide whether to display the domain number in /proc */ /* Display the domain number in /proc */
int pci_proc_domain(struct pci_bus *bus) int pci_proc_domain(struct pci_bus *bus)
{ {
return 0; return pci_domain_nr(bus);
} }
/* This header fixup will do the resource fixup for all devices as they are /* This header fixup will do the resource fixup for all devices as they are
......
...@@ -274,4 +274,15 @@ config PCIE_ARTPEC6 ...@@ -274,4 +274,15 @@ config PCIE_ARTPEC6
Say Y here to enable PCIe controller support on Axis ARTPEC-6 Say Y here to enable PCIe controller support on Axis ARTPEC-6
SoCs. This PCIe controller uses the DesignWare core. SoCs. This PCIe controller uses the DesignWare core.
config PCIE_ROCKCHIP
bool "Rockchip PCIe controller"
depends on ARCH_ROCKCHIP
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
select MFD_SYSCON
help
Say Y here if you want internal PCI support on Rockchip SoC.
There is 1 internal PCIe port available to support GEN2 with
4 slots.
endmenu endmenu
...@@ -31,3 +31,4 @@ obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o ...@@ -31,3 +31,4 @@ obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
...@@ -927,10 +927,8 @@ static int advk_pcie_probe(struct platform_device *pdev) ...@@ -927,10 +927,8 @@ static int advk_pcie_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pcie->base = devm_ioremap_resource(&pdev->dev, res); pcie->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pcie->base)) { if (IS_ERR(pcie->base))
dev_err(&pdev->dev, "Failed to map registers\n");
return PTR_ERR(pcie->base); return PTR_ERR(pcie->base);
}
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, irq, advk_pcie_irq_handler, ret = devm_request_irq(&pdev->dev, irq, advk_pcie_irq_handler,
......
...@@ -425,12 +425,15 @@ static void exynos_pcie_enable_interrupts(struct pcie_port *pp) ...@@ -425,12 +425,15 @@ static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
exynos_pcie_msi_init(pp); exynos_pcie_msi_init(pp);
} }
static inline void exynos_pcie_readl_rc(struct pcie_port *pp, static inline u32 exynos_pcie_readl_rc(struct pcie_port *pp,
void __iomem *dbi_base, u32 *val) void __iomem *dbi_base)
{ {
u32 val;
exynos_pcie_sideband_dbi_r_mode(pp, true); exynos_pcie_sideband_dbi_r_mode(pp, true);
*val = readl(dbi_base); val = readl(dbi_base);
exynos_pcie_sideband_dbi_r_mode(pp, false); exynos_pcie_sideband_dbi_r_mode(pp, false);
return val;
} }
static inline void exynos_pcie_writel_rc(struct pcie_port *pp, static inline void exynos_pcie_writel_rc(struct pcie_port *pp,
......
...@@ -200,11 +200,11 @@ struct tran_int_desc { ...@@ -200,11 +200,11 @@ struct tran_int_desc {
*/ */
struct pci_message { struct pci_message {
u32 message_type; u32 type;
} __packed; } __packed;
struct pci_child_message { struct pci_child_message {
u32 message_type; struct pci_message message_type;
union win_slot_encoding wslot; union win_slot_encoding wslot;
} __packed; } __packed;
...@@ -222,7 +222,8 @@ struct pci_packet { ...@@ -222,7 +222,8 @@ struct pci_packet {
void (*completion_func)(void *context, struct pci_response *resp, void (*completion_func)(void *context, struct pci_response *resp,
int resp_packet_size); int resp_packet_size);
void *compl_ctxt; void *compl_ctxt;
struct pci_message message;
struct pci_message message[0];
}; };
/* /*
...@@ -258,7 +259,7 @@ struct pci_bus_d0_entry { ...@@ -258,7 +259,7 @@ struct pci_bus_d0_entry {
struct pci_bus_relations { struct pci_bus_relations {
struct pci_incoming_message incoming; struct pci_incoming_message incoming;
u32 device_count; u32 device_count;
struct pci_function_description func[1]; struct pci_function_description func[0];
} __packed; } __packed;
struct pci_q_res_req_response { struct pci_q_res_req_response {
...@@ -314,7 +315,7 @@ struct pci_dev_incoming { ...@@ -314,7 +315,7 @@ struct pci_dev_incoming {
} __packed; } __packed;
struct pci_eject_response { struct pci_eject_response {
u32 message_type; struct pci_message message_type;
union win_slot_encoding wslot; union win_slot_encoding wslot;
u32 status; u32 status;
} __packed; } __packed;
...@@ -373,7 +374,6 @@ struct hv_pcibus_device { ...@@ -373,7 +374,6 @@ struct hv_pcibus_device {
struct list_head children; struct list_head children;
struct list_head dr_list; struct list_head dr_list;
struct work_struct wrk;
struct msi_domain_info msi_info; struct msi_domain_info msi_info;
struct msi_controller msi_chip; struct msi_controller msi_chip;
...@@ -393,7 +393,7 @@ struct hv_dr_work { ...@@ -393,7 +393,7 @@ struct hv_dr_work {
struct hv_dr_state { struct hv_dr_state {
struct list_head list_entry; struct list_head list_entry;
u32 device_count; u32 device_count;
struct pci_function_description func[1]; struct pci_function_description func[0];
}; };
enum hv_pcichild_state { enum hv_pcichild_state {
...@@ -447,15 +447,16 @@ struct hv_pci_compl { ...@@ -447,15 +447,16 @@ struct hv_pci_compl {
* for any message for which the completion packet contains a * for any message for which the completion packet contains a
* status and nothing else. * status and nothing else.
*/ */
static static void hv_pci_generic_compl(void *context, struct pci_response *resp,
void
hv_pci_generic_compl(void *context, struct pci_response *resp,
int resp_packet_size) int resp_packet_size)
{ {
struct hv_pci_compl *comp_pkt = context; struct hv_pci_compl *comp_pkt = context;
if (resp_packet_size >= offsetofend(struct pci_response, status)) if (resp_packet_size >= offsetofend(struct pci_response, status))
comp_pkt->completion_status = resp->status; comp_pkt->completion_status = resp->status;
else
comp_pkt->completion_status = -1;
complete(&comp_pkt->host_event); complete(&comp_pkt->host_event);
} }
...@@ -694,13 +695,12 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev, ...@@ -694,13 +695,12 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
struct pci_delete_interrupt *int_pkt; struct pci_delete_interrupt *int_pkt;
struct { struct {
struct pci_packet pkt; struct pci_packet pkt;
u8 buffer[sizeof(struct pci_delete_interrupt) - u8 buffer[sizeof(struct pci_delete_interrupt)];
sizeof(struct pci_message)];
} ctxt; } ctxt;
memset(&ctxt, 0, sizeof(ctxt)); memset(&ctxt, 0, sizeof(ctxt));
int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
int_pkt->message_type.message_type = int_pkt->message_type.type =
PCI_DELETE_INTERRUPT_MESSAGE; PCI_DELETE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = hpdev->desc.win_slot.slot; int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
int_pkt->int_desc = *int_desc; int_pkt->int_desc = *int_desc;
...@@ -847,8 +847,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) ...@@ -847,8 +847,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct cpumask *affinity; struct cpumask *affinity;
struct { struct {
struct pci_packet pkt; struct pci_packet pkt;
u8 buffer[sizeof(struct pci_create_interrupt) - u8 buffer[sizeof(struct pci_create_interrupt)];
sizeof(struct pci_message)];
} ctxt; } ctxt;
int cpu; int cpu;
int ret; int ret;
...@@ -876,7 +875,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) ...@@ -876,7 +875,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
ctxt.pkt.completion_func = hv_pci_compose_compl; ctxt.pkt.completion_func = hv_pci_compose_compl;
ctxt.pkt.compl_ctxt = &comp; ctxt.pkt.compl_ctxt = &comp;
int_pkt = (struct pci_create_interrupt *)&ctxt.pkt.message; int_pkt = (struct pci_create_interrupt *)&ctxt.pkt.message;
int_pkt->message_type.message_type = PCI_CREATE_INTERRUPT_MESSAGE; int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = hpdev->desc.win_slot.slot; int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
int_pkt->int_desc.vector = cfg->vector; int_pkt->int_desc.vector = cfg->vector;
int_pkt->int_desc.vector_count = 1; int_pkt->int_desc.vector_count = 1;
...@@ -897,7 +896,9 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) ...@@ -897,7 +896,9 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
sizeof(*int_pkt), (unsigned long)&ctxt.pkt, sizeof(*int_pkt), (unsigned long)&ctxt.pkt,
VM_PKT_DATA_INBAND, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (!ret) if (ret)
goto free_int_desc;
wait_for_completion(&comp.comp_pkt.host_event); wait_for_completion(&comp.comp_pkt.host_event);
if (comp.comp_pkt.completion_status < 0) { if (comp.comp_pkt.completion_status < 0) {
...@@ -1289,7 +1290,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus, ...@@ -1289,7 +1290,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
pkt.init_packet.compl_ctxt = &comp_pkt; pkt.init_packet.compl_ctxt = &comp_pkt;
pkt.init_packet.completion_func = q_resource_requirements; pkt.init_packet.completion_func = q_resource_requirements;
res_req = (struct pci_child_message *)&pkt.init_packet.message; res_req = (struct pci_child_message *)&pkt.init_packet.message;
res_req->message_type = PCI_QUERY_RESOURCE_REQUIREMENTS; res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
res_req->wslot.slot = desc->win_slot.slot; res_req->wslot.slot = desc->win_slot.slot;
ret = vmbus_sendpacket(hbus->hdev->channel, res_req, ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
...@@ -1466,8 +1467,7 @@ static void pci_devices_present_work(struct work_struct *work) ...@@ -1466,8 +1467,7 @@ static void pci_devices_present_work(struct work_struct *work)
if (hpdev->reported_missing) { if (hpdev->reported_missing) {
found = true; found = true;
put_pcichild(hpdev, hv_pcidev_ref_childlist); put_pcichild(hpdev, hv_pcidev_ref_childlist);
list_del(&hpdev->list_entry); list_move_tail(&hpdev->list_entry, &removed);
list_add_tail(&hpdev->list_entry, &removed);
break; break;
} }
} }
...@@ -1558,8 +1558,7 @@ static void hv_eject_device_work(struct work_struct *work) ...@@ -1558,8 +1558,7 @@ static void hv_eject_device_work(struct work_struct *work)
int wslot; int wslot;
struct { struct {
struct pci_packet pkt; struct pci_packet pkt;
u8 buffer[sizeof(struct pci_eject_response) - u8 buffer[sizeof(struct pci_eject_response)];
sizeof(struct pci_message)];
} ctxt; } ctxt;
hpdev = container_of(work, struct hv_pci_dev, wrk); hpdev = container_of(work, struct hv_pci_dev, wrk);
...@@ -1585,7 +1584,7 @@ static void hv_eject_device_work(struct work_struct *work) ...@@ -1585,7 +1584,7 @@ static void hv_eject_device_work(struct work_struct *work)
memset(&ctxt, 0, sizeof(ctxt)); memset(&ctxt, 0, sizeof(ctxt));
ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
ejct_pkt->message_type = PCI_EJECTION_COMPLETE; ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot; ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt, vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt,
sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt, sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
...@@ -1688,7 +1687,7 @@ static void hv_pci_onchannelcallback(void *context) ...@@ -1688,7 +1687,7 @@ static void hv_pci_onchannelcallback(void *context)
case VM_PKT_DATA_INBAND: case VM_PKT_DATA_INBAND:
new_message = (struct pci_incoming_message *)buffer; new_message = (struct pci_incoming_message *)buffer;
switch (new_message->message_type.message_type) { switch (new_message->message_type.type) {
case PCI_BUS_RELATIONS: case PCI_BUS_RELATIONS:
bus_rel = (struct pci_bus_relations *)buffer; bus_rel = (struct pci_bus_relations *)buffer;
...@@ -1719,7 +1718,7 @@ static void hv_pci_onchannelcallback(void *context) ...@@ -1719,7 +1718,7 @@ static void hv_pci_onchannelcallback(void *context)
default: default:
dev_warn(&hbus->hdev->device, dev_warn(&hbus->hdev->device,
"Unimplemented protocol message %x\n", "Unimplemented protocol message %x\n",
new_message->message_type.message_type); new_message->message_type.type);
break; break;
} }
break; break;
...@@ -1772,7 +1771,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev) ...@@ -1772,7 +1771,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
pkt->completion_func = hv_pci_generic_compl; pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt; pkt->compl_ctxt = &comp_pkt;
version_req = (struct pci_version_request *)&pkt->message; version_req = (struct pci_version_request *)&pkt->message;
version_req->message_type.message_type = PCI_QUERY_PROTOCOL_VERSION; version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
version_req->protocol_version = PCI_PROTOCOL_VERSION_CURRENT; version_req->protocol_version = PCI_PROTOCOL_VERSION_CURRENT;
ret = vmbus_sendpacket(hdev->channel, version_req, ret = vmbus_sendpacket(hdev->channel, version_req,
...@@ -1973,7 +1972,7 @@ static int hv_pci_enter_d0(struct hv_device *hdev) ...@@ -1973,7 +1972,7 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
pkt->completion_func = hv_pci_generic_compl; pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt; pkt->compl_ctxt = &comp_pkt;
d0_entry = (struct pci_bus_d0_entry *)&pkt->message; d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
d0_entry->message_type.message_type = PCI_BUS_D0ENTRY; d0_entry->message_type.type = PCI_BUS_D0ENTRY;
d0_entry->mmio_base = hbus->mem_config->start; d0_entry->mmio_base = hbus->mem_config->start;
ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry), ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
...@@ -2019,7 +2018,7 @@ static int hv_pci_query_relations(struct hv_device *hdev) ...@@ -2019,7 +2018,7 @@ static int hv_pci_query_relations(struct hv_device *hdev)
return -ENOTEMPTY; return -ENOTEMPTY;
memset(&message, 0, sizeof(message)); memset(&message, 0, sizeof(message));
message.message_type = PCI_QUERY_BUS_RELATIONS; message.type = PCI_QUERY_BUS_RELATIONS;
ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message), ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
0, VM_PKT_DATA_INBAND, 0); 0, VM_PKT_DATA_INBAND, 0);
...@@ -2072,8 +2071,8 @@ static int hv_send_resources_allocated(struct hv_device *hdev) ...@@ -2072,8 +2071,8 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
init_completion(&comp_pkt.host_event); init_completion(&comp_pkt.host_event);
pkt->completion_func = hv_pci_generic_compl; pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt; pkt->compl_ctxt = &comp_pkt;
pkt->message.message_type = PCI_RESOURCES_ASSIGNED;
res_assigned = (struct pci_resources_assigned *)&pkt->message; res_assigned = (struct pci_resources_assigned *)&pkt->message;
res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED;
res_assigned->wslot.slot = hpdev->desc.win_slot.slot; res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
put_pcichild(hpdev, hv_pcidev_ref_by_slot); put_pcichild(hpdev, hv_pcidev_ref_by_slot);
...@@ -2123,7 +2122,7 @@ static int hv_send_resources_released(struct hv_device *hdev) ...@@ -2123,7 +2122,7 @@ static int hv_send_resources_released(struct hv_device *hdev)
continue; continue;
memset(&pkt, 0, sizeof(pkt)); memset(&pkt, 0, sizeof(pkt));
pkt.message_type = PCI_RESOURCES_RELEASED; pkt.message_type.type = PCI_RESOURCES_RELEASED;
pkt.wslot.slot = hpdev->desc.win_slot.slot; pkt.wslot.slot = hpdev->desc.win_slot.slot;
put_pcichild(hpdev, hv_pcidev_ref_by_slot); put_pcichild(hpdev, hv_pcidev_ref_by_slot);
...@@ -2290,7 +2289,7 @@ static int hv_pci_remove(struct hv_device *hdev) ...@@ -2290,7 +2289,7 @@ static int hv_pci_remove(struct hv_device *hdev)
init_completion(&comp_pkt.host_event); init_completion(&comp_pkt.host_event);
pkt.teardown_packet.completion_func = hv_pci_generic_compl; pkt.teardown_packet.completion_func = hv_pci_generic_compl;
pkt.teardown_packet.compl_ctxt = &comp_pkt; pkt.teardown_packet.compl_ctxt = &comp_pkt;
pkt.teardown_packet.message.message_type = PCI_BUS_D0EXIT; pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message, ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message,
sizeof(struct pci_message), sizeof(struct pci_message),
......
...@@ -334,8 +334,9 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, ...@@ -334,8 +334,9 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
if (ks_pcie->error_irq <= 0) if (ks_pcie->error_irq <= 0)
dev_info(&pdev->dev, "no error IRQ defined\n"); dev_info(&pdev->dev, "no error IRQ defined\n");
else { else {
if (request_irq(ks_pcie->error_irq, pcie_err_irq_handler, ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
IRQF_SHARED, "pcie-error-irq", ks_pcie) < 0) { IRQF_SHARED, "pcie-error-irq", ks_pcie);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request error IRQ %d\n", dev_err(&pdev->dev, "failed to request error IRQ %d\n",
ks_pcie->error_irq); ks_pcie->error_irq);
return ret; return ret;
......
...@@ -240,7 +240,7 @@ struct tegra_msi { ...@@ -240,7 +240,7 @@ struct tegra_msi {
}; };
/* used to differentiate between Tegra SoC generations */ /* used to differentiate between Tegra SoC generations */
struct tegra_pcie_soc_data { struct tegra_pcie_soc {
unsigned int num_ports; unsigned int num_ports;
unsigned int msi_base_shift; unsigned int msi_base_shift;
u32 pads_pll_ctl; u32 pads_pll_ctl;
...@@ -300,7 +300,7 @@ struct tegra_pcie { ...@@ -300,7 +300,7 @@ struct tegra_pcie {
struct regulator_bulk_data *supplies; struct regulator_bulk_data *supplies;
unsigned int num_supplies; unsigned int num_supplies;
const struct tegra_pcie_soc_data *soc_data; const struct tegra_pcie_soc *soc;
struct dentry *debugfs; struct dentry *debugfs;
}; };
...@@ -542,8 +542,8 @@ static void tegra_pcie_port_reset(struct tegra_pcie_port *port) ...@@ -542,8 +542,8 @@ static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
static void tegra_pcie_port_enable(struct tegra_pcie_port *port) static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
{ {
const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
const struct tegra_pcie_soc *soc = port->pcie->soc;
unsigned long value; unsigned long value;
/* enable reference clock */ /* enable reference clock */
...@@ -562,8 +562,8 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port) ...@@ -562,8 +562,8 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
static void tegra_pcie_port_disable(struct tegra_pcie_port *port) static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
{ {
const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
const struct tegra_pcie_soc *soc = port->pcie->soc;
unsigned long value; unsigned long value;
/* assert port reset */ /* assert port reset */
...@@ -777,7 +777,7 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie) ...@@ -777,7 +777,7 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout) static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
{ {
const struct tegra_pcie_soc_data *soc = pcie->soc_data; const struct tegra_pcie_soc *soc = pcie->soc;
u32 value; u32 value;
timeout = jiffies + msecs_to_jiffies(timeout); timeout = jiffies + msecs_to_jiffies(timeout);
...@@ -793,7 +793,7 @@ static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout) ...@@ -793,7 +793,7 @@ static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
static int tegra_pcie_phy_enable(struct tegra_pcie *pcie) static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
{ {
const struct tegra_pcie_soc_data *soc = pcie->soc_data; const struct tegra_pcie_soc *soc = pcie->soc;
u32 value; u32 value;
int err; int err;
...@@ -848,7 +848,7 @@ static int tegra_pcie_phy_enable(struct tegra_pcie *pcie) ...@@ -848,7 +848,7 @@ static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
static int tegra_pcie_phy_disable(struct tegra_pcie *pcie) static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
{ {
const struct tegra_pcie_soc_data *soc = pcie->soc_data; const struct tegra_pcie_soc *soc = pcie->soc;
u32 value; u32 value;
/* disable TX/RX data */ /* disable TX/RX data */
...@@ -909,7 +909,7 @@ static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port) ...@@ -909,7 +909,7 @@ static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie) static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
{ {
const struct tegra_pcie_soc_data *soc = pcie->soc_data; const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_pcie_port *port; struct tegra_pcie_port *port;
int err; int err;
...@@ -977,7 +977,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie) ...@@ -977,7 +977,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
{ {
const struct tegra_pcie_soc_data *soc = pcie->soc_data; const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_pcie_port *port; struct tegra_pcie_port *port;
unsigned long value; unsigned long value;
int err; int err;
...@@ -1070,7 +1070,7 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie) ...@@ -1070,7 +1070,7 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
static int tegra_pcie_power_on(struct tegra_pcie *pcie) static int tegra_pcie_power_on(struct tegra_pcie *pcie)
{ {
const struct tegra_pcie_soc_data *soc = pcie->soc_data; const struct tegra_pcie_soc *soc = pcie->soc;
int err; int err;
reset_control_assert(pcie->pcie_xrst); reset_control_assert(pcie->pcie_xrst);
...@@ -1120,7 +1120,7 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie) ...@@ -1120,7 +1120,7 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
static int tegra_pcie_clocks_get(struct tegra_pcie *pcie) static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
{ {
const struct tegra_pcie_soc_data *soc = pcie->soc_data; const struct tegra_pcie_soc *soc = pcie->soc;
pcie->pex_clk = devm_clk_get(pcie->dev, "pex"); pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
if (IS_ERR(pcie->pex_clk)) if (IS_ERR(pcie->pex_clk))
...@@ -1237,7 +1237,7 @@ static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port) ...@@ -1237,7 +1237,7 @@ static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
static int tegra_pcie_phys_get(struct tegra_pcie *pcie) static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
{ {
const struct tegra_pcie_soc_data *soc = pcie->soc_data; const struct tegra_pcie_soc *soc = pcie->soc;
struct device_node *np = pcie->dev->of_node; struct device_node *np = pcie->dev->of_node;
struct tegra_pcie_port *port; struct tegra_pcie_port *port;
int err; int err;
...@@ -1489,7 +1489,7 @@ static const struct irq_domain_ops msi_domain_ops = { ...@@ -1489,7 +1489,7 @@ static const struct irq_domain_ops msi_domain_ops = {
static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
{ {
struct platform_device *pdev = to_platform_device(pcie->dev); struct platform_device *pdev = to_platform_device(pcie->dev);
const struct tegra_pcie_soc_data *soc = pcie->soc_data; const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_msi *msi = &pcie->msi; struct tegra_msi *msi = &pcie->msi;
unsigned long base; unsigned long base;
int err; int err;
...@@ -1802,8 +1802,8 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) ...@@ -1802,8 +1802,8 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
{ {
const struct tegra_pcie_soc_data *soc = pcie->soc_data;
struct device_node *np = pcie->dev->of_node, *port; struct device_node *np = pcie->dev->of_node, *port;
const struct tegra_pcie_soc *soc = pcie->soc;
struct of_pci_range_parser parser; struct of_pci_range_parser parser;
struct of_pci_range range; struct of_pci_range range;
u32 lanes = 0, mask = 0; u32 lanes = 0, mask = 0;
...@@ -2046,7 +2046,7 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie) ...@@ -2046,7 +2046,7 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
return 0; return 0;
} }
static const struct tegra_pcie_soc_data tegra20_pcie_data = { static const struct tegra_pcie_soc tegra20_pcie = {
.num_ports = 2, .num_ports = 2,
.msi_base_shift = 0, .msi_base_shift = 0,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA20, .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
...@@ -2059,7 +2059,7 @@ static const struct tegra_pcie_soc_data tegra20_pcie_data = { ...@@ -2059,7 +2059,7 @@ static const struct tegra_pcie_soc_data tegra20_pcie_data = {
.has_gen2 = false, .has_gen2 = false,
}; };
static const struct tegra_pcie_soc_data tegra30_pcie_data = { static const struct tegra_pcie_soc tegra30_pcie = {
.num_ports = 3, .num_ports = 3,
.msi_base_shift = 8, .msi_base_shift = 8,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
...@@ -2073,7 +2073,7 @@ static const struct tegra_pcie_soc_data tegra30_pcie_data = { ...@@ -2073,7 +2073,7 @@ static const struct tegra_pcie_soc_data tegra30_pcie_data = {
.has_gen2 = false, .has_gen2 = false,
}; };
static const struct tegra_pcie_soc_data tegra124_pcie_data = { static const struct tegra_pcie_soc tegra124_pcie = {
.num_ports = 2, .num_ports = 2,
.msi_base_shift = 8, .msi_base_shift = 8,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
...@@ -2087,9 +2087,9 @@ static const struct tegra_pcie_soc_data tegra124_pcie_data = { ...@@ -2087,9 +2087,9 @@ static const struct tegra_pcie_soc_data tegra124_pcie_data = {
}; };
static const struct of_device_id tegra_pcie_of_match[] = { static const struct of_device_id tegra_pcie_of_match[] = {
{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data }, { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data }, { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data }, { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
{ }, { },
}; };
...@@ -2204,21 +2204,16 @@ static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie) ...@@ -2204,21 +2204,16 @@ static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
static int tegra_pcie_probe(struct platform_device *pdev) static int tegra_pcie_probe(struct platform_device *pdev)
{ {
const struct of_device_id *match;
struct tegra_pcie *pcie; struct tegra_pcie *pcie;
int err; int err;
match = of_match_device(tegra_pcie_of_match, &pdev->dev);
if (!match)
return -ENODEV;
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie) if (!pcie)
return -ENOMEM; return -ENOMEM;
pcie->soc = of_device_get_match_data(&pdev->dev);
INIT_LIST_HEAD(&pcie->buses); INIT_LIST_HEAD(&pcie->buses);
INIT_LIST_HEAD(&pcie->ports); INIT_LIST_HEAD(&pcie->ports);
pcie->soc_data = match->data;
pcie->dev = &pdev->dev; pcie->dev = &pdev->dev;
err = tegra_pcie_parse_dt(pcie); err = tegra_pcie_parse_dt(pcie);
......
...@@ -241,11 +241,6 @@ static int altera_msi_probe(struct platform_device *pdev) ...@@ -241,11 +241,6 @@ static int altera_msi_probe(struct platform_device *pdev)
msi->pdev = pdev; msi->pdev = pdev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
if (!res) {
dev_err(&pdev->dev, "no csr memory resource defined\n");
return -ENODEV;
}
msi->csr_base = devm_ioremap_resource(&pdev->dev, res); msi->csr_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(msi->csr_base)) { if (IS_ERR(msi->csr_base)) {
dev_err(&pdev->dev, "failed to map csr memory\n"); dev_err(&pdev->dev, "failed to map csr memory\n");
...@@ -254,11 +249,6 @@ static int altera_msi_probe(struct platform_device *pdev) ...@@ -254,11 +249,6 @@ static int altera_msi_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"vector_slave"); "vector_slave");
if (!res) {
dev_err(&pdev->dev, "no vector_slave memory resource defined\n");
return -ENODEV;
}
msi->vector_base = devm_ioremap_resource(&pdev->dev, res); msi->vector_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(msi->vector_base)) { if (IS_ERR(msi->vector_base)) {
dev_err(&pdev->dev, "failed to map vector_slave memory\n"); dev_err(&pdev->dev, "failed to map vector_slave memory\n");
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#define RP_LTSSM_MASK 0x1f #define RP_LTSSM_MASK 0x1f
#define LTSSM_L0 0xf #define LTSSM_L0 0xf
#define PCIE_CAP_OFFSET 0x80
/* TLP configuration type 0 and 1 */ /* TLP configuration type 0 and 1 */
#define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */ #define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */
#define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */ #define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */
...@@ -64,7 +65,8 @@ ...@@ -64,7 +65,8 @@
#define TLP_LOOP 500 #define TLP_LOOP 500
#define RP_DEVFN 0 #define RP_DEVFN 0
#define LINK_UP_TIMEOUT 5000 #define LINK_UP_TIMEOUT HZ
#define LINK_RETRAIN_TIMEOUT HZ
#define INTX_NUM 4 #define INTX_NUM 4
...@@ -102,38 +104,6 @@ static bool altera_pcie_link_is_up(struct altera_pcie *pcie) ...@@ -102,38 +104,6 @@ static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0); return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
} }
static void altera_pcie_retrain(struct pci_dev *dev)
{
u16 linkcap, linkstat;
struct altera_pcie *pcie = dev->bus->sysdata;
int timeout = 0;
if (!altera_pcie_link_is_up(pcie))
return;
/*
* Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
* current speed is 2.5 GB/s.
*/
pcie_capability_read_word(dev, PCI_EXP_LNKCAP, &linkcap);
if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
return;
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &linkstat);
if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
pcie_capability_set_word(dev, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_RL);
while (!altera_pcie_link_is_up(pcie)) {
timeout++;
if (timeout > LINK_UP_TIMEOUT)
break;
udelay(5);
}
}
}
DECLARE_PCI_FIXUP_EARLY(0x1172, PCI_ANY_ID, altera_pcie_retrain);
/* /*
* Altera PCIe port uses BAR0 of RC's configuration space as the translation * Altera PCIe port uses BAR0 of RC's configuration space as the translation
* from PCI bus to native BUS. Entire DDR region is mapped into PCIe space * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space
...@@ -297,22 +267,14 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, ...@@ -297,22 +267,14 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
return PCIBIOS_SUCCESSFUL; return PCIBIOS_SUCCESSFUL;
} }
static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
int where, int size, u32 *value) unsigned int devfn, int where, int size,
u32 *value)
{ {
struct altera_pcie *pcie = bus->sysdata;
int ret; int ret;
u32 data; u32 data;
u8 byte_en; u8 byte_en;
if (altera_pcie_hide_rc_bar(bus, devfn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) {
*value = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
switch (size) { switch (size) {
case 1: case 1:
byte_en = 1 << (where & 3); byte_en = 1 << (where & 3);
...@@ -325,7 +287,7 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, ...@@ -325,7 +287,7 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
break; break;
} }
ret = tlp_cfg_dword_read(pcie, bus->number, devfn, ret = tlp_cfg_dword_read(pcie, busno, devfn,
(where & ~DWORD_MASK), byte_en, &data); (where & ~DWORD_MASK), byte_en, &data);
if (ret != PCIBIOS_SUCCESSFUL) if (ret != PCIBIOS_SUCCESSFUL)
return ret; return ret;
...@@ -345,20 +307,14 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, ...@@ -345,20 +307,14 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_SUCCESSFUL; return PCIBIOS_SUCCESSFUL;
} }
static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn, static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno,
int where, int size, u32 value) unsigned int devfn, int where, int size,
u32 value)
{ {
struct altera_pcie *pcie = bus->sysdata;
u32 data32; u32 data32;
u32 shift = 8 * (where & 3); u32 shift = 8 * (where & 3);
u8 byte_en; u8 byte_en;
if (altera_pcie_hide_rc_bar(bus, devfn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn)))
return PCIBIOS_DEVICE_NOT_FOUND;
switch (size) { switch (size) {
case 1: case 1:
data32 = (value & 0xff) << shift; data32 = (value & 0xff) << shift;
...@@ -374,8 +330,40 @@ static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn, ...@@ -374,8 +330,40 @@ static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
break; break;
} }
return tlp_cfg_dword_write(pcie, bus->number, devfn, return tlp_cfg_dword_write(pcie, busno, devfn, (where & ~DWORD_MASK),
(where & ~DWORD_MASK), byte_en, data32); byte_en, data32);
}
static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *value)
{
struct altera_pcie *pcie = bus->sysdata;
if (altera_pcie_hide_rc_bar(bus, devfn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) {
*value = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size,
value);
}
static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 value)
{
struct altera_pcie *pcie = bus->sysdata;
if (altera_pcie_hide_rc_bar(bus, devfn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn)))
return PCIBIOS_DEVICE_NOT_FOUND;
return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size,
value);
} }
static struct pci_ops altera_pcie_ops = { static struct pci_ops altera_pcie_ops = {
...@@ -383,6 +371,90 @@ static struct pci_ops altera_pcie_ops = { ...@@ -383,6 +371,90 @@ static struct pci_ops altera_pcie_ops = {
.write = altera_pcie_cfg_write, .write = altera_pcie_cfg_write,
}; };
static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno,
unsigned int devfn, int offset, u16 *value)
{
u32 data;
int ret;
ret = _altera_pcie_cfg_read(pcie, busno, devfn,
PCIE_CAP_OFFSET + offset, sizeof(*value),
&data);
*value = data;
return ret;
}
static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno,
unsigned int devfn, int offset, u16 value)
{
return _altera_pcie_cfg_write(pcie, busno, devfn,
PCIE_CAP_OFFSET + offset, sizeof(value),
value);
}
static void altera_wait_link_retrain(struct altera_pcie *pcie)
{
u16 reg16;
unsigned long start_jiffies;
/* Wait for link training end. */
start_jiffies = jiffies;
for (;;) {
altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_LT))
break;
if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) {
dev_err(&pcie->pdev->dev, "link retrain timeout\n");
break;
}
udelay(100);
}
/* Wait for link is up */
start_jiffies = jiffies;
for (;;) {
if (altera_pcie_link_is_up(pcie))
break;
if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
dev_err(&pcie->pdev->dev, "link up timeout\n");
break;
}
udelay(100);
}
}
static void altera_pcie_retrain(struct altera_pcie *pcie)
{
u16 linkcap, linkstat, linkctl;
if (!altera_pcie_link_is_up(pcie))
return;
/*
* Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
* current speed is 2.5 GB/s.
*/
altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKCAP,
&linkcap);
if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
return;
altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKSTA,
&linkstat);
if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
PCI_EXP_LNKCTL, &linkctl);
linkctl |= PCI_EXP_LNKCTL_RL;
altera_write_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
PCI_EXP_LNKCTL, linkctl);
altera_wait_link_retrain(pcie);
}
}
static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq, static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq) irq_hw_number_t hwirq)
{ {
...@@ -504,6 +576,11 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie) ...@@ -504,6 +576,11 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
return 0; return 0;
} }
static void altera_pcie_host_init(struct altera_pcie *pcie)
{
altera_pcie_retrain(pcie);
}
static int altera_pcie_probe(struct platform_device *pdev) static int altera_pcie_probe(struct platform_device *pdev)
{ {
struct altera_pcie *pcie; struct altera_pcie *pcie;
...@@ -541,6 +618,7 @@ static int altera_pcie_probe(struct platform_device *pdev) ...@@ -541,6 +618,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS); cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
/* enable all interrupts */ /* enable all interrupts */
cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
altera_pcie_host_init(pcie);
bus = pci_scan_root_bus(&pdev->dev, pcie->root_bus_nr, &altera_pcie_ops, bus = pci_scan_root_bus(&pdev->dev, pcie->root_bus_nr, &altera_pcie_ops,
pcie, &pcie->resources); pcie, &pcie->resources);
......
...@@ -191,7 +191,7 @@ static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg) ...@@ -191,7 +191,7 @@ static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg)
return dw_handle_msi_irq(pp); return dw_handle_msi_irq(pp);
} }
static int __init artpec6_add_pcie_port(struct pcie_port *pp, static int artpec6_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev) struct platform_device *pdev)
{ {
int ret; int ret;
......
...@@ -100,9 +100,6 @@ static int dw_plat_pcie_probe(struct platform_device *pdev) ...@@ -100,9 +100,6 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
pp->dev = &pdev->dev; pp->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
dw_plat_pcie->mem_base = devm_ioremap_resource(&pdev->dev, res); dw_plat_pcie->mem_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dw_plat_pcie->mem_base)) if (IS_ERR(dw_plat_pcie->mem_base))
return PTR_ERR(dw_plat_pcie->mem_base); return PTR_ERR(dw_plat_pcie->mem_base);
......
...@@ -25,7 +25,17 @@ ...@@ -25,7 +25,17 @@
#include "pcie-designware.h" #include "pcie-designware.h"
/* Synopsis specific PCIE configuration registers */ /* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES 10
#define LINK_WAIT_USLEEP_MIN 90000
#define LINK_WAIT_USLEEP_MAX 100000
/* Parameters for the waiting for iATU enabled routine */
#define LINK_WAIT_MAX_IATU_RETRIES 5
#define LINK_WAIT_IATU_MIN 9000
#define LINK_WAIT_IATU_MAX 10000
/* Synopsys-specific PCIe configuration registers */
#define PCIE_PORT_LINK_CONTROL 0x710 #define PCIE_PORT_LINK_CONTROL 0x710
#define PORT_LINK_MODE_MASK (0x3f << 16) #define PORT_LINK_MODE_MASK (0x3f << 16)
#define PORT_LINK_MODE_1_LANES (0x1 << 16) #define PORT_LINK_MODE_1_LANES (0x1 << 16)
...@@ -50,6 +60,7 @@ ...@@ -50,6 +60,7 @@
#define PCIE_ATU_VIEWPORT 0x900 #define PCIE_ATU_VIEWPORT 0x900
#define PCIE_ATU_REGION_INBOUND (0x1 << 31) #define PCIE_ATU_REGION_INBOUND (0x1 << 31)
#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) #define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
#define PCIE_ATU_REGION_INDEX2 (0x2 << 0)
#define PCIE_ATU_REGION_INDEX1 (0x1 << 0) #define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
#define PCIE_ATU_REGION_INDEX0 (0x0 << 0) #define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
#define PCIE_ATU_CR1 0x904 #define PCIE_ATU_CR1 0x904
...@@ -69,10 +80,26 @@ ...@@ -69,10 +80,26 @@
#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
#define PCIE_ATU_UPPER_TARGET 0x91C #define PCIE_ATU_UPPER_TARGET 0x91C
/*
* iATU Unroll-specific register definitions
* From 4.80 core version the address translation will be made by unroll
*/
#define PCIE_ATU_UNR_REGION_CTRL1 0x00
#define PCIE_ATU_UNR_REGION_CTRL2 0x04
#define PCIE_ATU_UNR_LOWER_BASE 0x08
#define PCIE_ATU_UNR_UPPER_BASE 0x0C
#define PCIE_ATU_UNR_LIMIT 0x10
#define PCIE_ATU_UNR_LOWER_TARGET 0x14
#define PCIE_ATU_UNR_UPPER_TARGET 0x18
/* Register address builder */
#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) ((0x3 << 20) | (region << 9))
/* PCIe Port Logic registers */ /* PCIe Port Logic registers */
#define PLR_OFFSET 0x700 #define PLR_OFFSET 0x700
#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c) #define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c)
#define PCIE_PHY_DEBUG_R1_LINK_UP 0x00000010 #define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4)
#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29)
static struct pci_ops dw_pcie_ops; static struct pci_ops dw_pcie_ops;
...@@ -114,12 +141,12 @@ int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val) ...@@ -114,12 +141,12 @@ int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val)
return PCIBIOS_SUCCESSFUL; return PCIBIOS_SUCCESSFUL;
} }
static inline void dw_pcie_readl_rc(struct pcie_port *pp, u32 reg, u32 *val) static inline u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg)
{ {
if (pp->ops->readl_rc) if (pp->ops->readl_rc)
pp->ops->readl_rc(pp, pp->dbi_base + reg, val); return pp->ops->readl_rc(pp, pp->dbi_base + reg);
else
*val = readl(pp->dbi_base + reg); return readl(pp->dbi_base + reg);
} }
static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg) static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
...@@ -130,6 +157,27 @@ static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg) ...@@ -130,6 +157,27 @@ static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
writel(val, pp->dbi_base + reg); writel(val, pp->dbi_base + reg);
} }
static inline u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
if (pp->ops->readl_rc)
return pp->ops->readl_rc(pp, pp->dbi_base + offset + reg);
return readl(pp->dbi_base + offset + reg);
}
static inline void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index,
u32 val, u32 reg)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
if (pp->ops->writel_rc)
pp->ops->writel_rc(pp, val, pp->dbi_base + offset + reg);
else
writel(val, pp->dbi_base + offset + reg);
}
static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
u32 *val) u32 *val)
{ {
...@@ -151,24 +199,57 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, ...@@ -151,24 +199,57 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index, static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
int type, u64 cpu_addr, u64 pci_addr, u32 size) int type, u64 cpu_addr, u64 pci_addr, u32 size)
{ {
u32 val; u32 retries, val;
if (pp->iatu_unroll_enabled) {
dw_pcie_writel_unroll(pp, index,
lower_32_bits(cpu_addr), PCIE_ATU_UNR_LOWER_BASE);
dw_pcie_writel_unroll(pp, index,
upper_32_bits(cpu_addr), PCIE_ATU_UNR_UPPER_BASE);
dw_pcie_writel_unroll(pp, index,
lower_32_bits(cpu_addr + size - 1), PCIE_ATU_UNR_LIMIT);
dw_pcie_writel_unroll(pp, index,
lower_32_bits(pci_addr), PCIE_ATU_UNR_LOWER_TARGET);
dw_pcie_writel_unroll(pp, index,
upper_32_bits(pci_addr), PCIE_ATU_UNR_UPPER_TARGET);
dw_pcie_writel_unroll(pp, index,
type, PCIE_ATU_UNR_REGION_CTRL1);
dw_pcie_writel_unroll(pp, index,
PCIE_ATU_ENABLE, PCIE_ATU_UNR_REGION_CTRL2);
} else {
dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index, dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
PCIE_ATU_VIEWPORT); PCIE_ATU_VIEWPORT);
dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE); dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr),
dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE); PCIE_ATU_LOWER_BASE);
dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr),
PCIE_ATU_UPPER_BASE);
dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1), dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
PCIE_ATU_LIMIT); PCIE_ATU_LIMIT);
dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET); dw_pcie_writel_rc(pp, lower_32_bits(pci_addr),
dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET); PCIE_ATU_LOWER_TARGET);
dw_pcie_writel_rc(pp, upper_32_bits(pci_addr),
PCIE_ATU_UPPER_TARGET);
dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1); dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
}
/* /*
* Make sure ATU enable takes effect before any subsequent config * Make sure ATU enable takes effect before any subsequent config
* and I/O accesses. * and I/O accesses.
*/ */
dw_pcie_readl_rc(pp, PCIE_ATU_CR2, &val); for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
if (pp->iatu_unroll_enabled)
val = dw_pcie_readl_unroll(pp, index,
PCIE_ATU_UNR_REGION_CTRL2);
else
val = dw_pcie_readl_rc(pp, PCIE_ATU_CR2);
if (val == PCIE_ATU_ENABLE)
return;
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
}
dev_err(pp->dev, "iATU is not being enabled\n");
} }
static struct irq_chip dw_msi_irq_chip = { static struct irq_chip dw_msi_irq_chip = {
...@@ -411,7 +492,8 @@ int dw_pcie_link_up(struct pcie_port *pp) ...@@ -411,7 +492,8 @@ int dw_pcie_link_up(struct pcie_port *pp)
return pp->ops->link_up(pp); return pp->ops->link_up(pp);
val = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1); val = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
return val & PCIE_PHY_DEBUG_R1_LINK_UP; return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) &&
(!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)));
} }
static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq, static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
...@@ -427,6 +509,17 @@ static const struct irq_domain_ops msi_domain_ops = { ...@@ -427,6 +509,17 @@ static const struct irq_domain_ops msi_domain_ops = {
.map = dw_pcie_msi_map, .map = dw_pcie_msi_map,
}; };
static u8 dw_pcie_iatu_unroll_enabled(struct pcie_port *pp)
{
u32 val;
val = dw_pcie_readl_rc(pp, PCIE_ATU_VIEWPORT);
if (val == 0xffffffff)
return 1;
return 0;
}
int dw_pcie_host_init(struct pcie_port *pp) int dw_pcie_host_init(struct pcie_port *pp)
{ {
struct device_node *np = pp->dev->of_node; struct device_node *np = pp->dev->of_node;
...@@ -526,6 +619,10 @@ int dw_pcie_host_init(struct pcie_port *pp) ...@@ -526,6 +619,10 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (ret) if (ret)
pp->lanes = 0; pp->lanes = 0;
ret = of_property_read_u32(np, "num-viewport", &pp->num_viewport);
if (ret)
pp->num_viewport = 2;
if (IS_ENABLED(CONFIG_PCI_MSI)) { if (IS_ENABLED(CONFIG_PCI_MSI)) {
if (!pp->ops->msi_host_init) { if (!pp->ops->msi_host_init) {
pp->irq_domain = irq_domain_add_linear(pp->dev->of_node, pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
...@@ -546,6 +643,8 @@ int dw_pcie_host_init(struct pcie_port *pp) ...@@ -546,6 +643,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
} }
} }
pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
if (pp->ops->host_init) if (pp->ops->host_init)
pp->ops->host_init(pp); pp->ops->host_init(pp);
...@@ -611,11 +710,12 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, ...@@ -611,11 +710,12 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
va_cfg_base = pp->va_cfg1_base; va_cfg_base = pp->va_cfg1_base;
} }
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
type, cpu_addr, type, cpu_addr,
busdev, cfg_size); busdev, cfg_size);
ret = dw_pcie_cfg_read(va_cfg_base + where, size, val); ret = dw_pcie_cfg_read(va_cfg_base + where, size, val);
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, if (pp->num_viewport <= 2)
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
PCIE_ATU_TYPE_IO, pp->io_base, PCIE_ATU_TYPE_IO, pp->io_base,
pp->io_bus_addr, pp->io_size); pp->io_bus_addr, pp->io_size);
...@@ -648,11 +748,12 @@ static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, ...@@ -648,11 +748,12 @@ static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
va_cfg_base = pp->va_cfg1_base; va_cfg_base = pp->va_cfg1_base;
} }
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
type, cpu_addr, type, cpu_addr,
busdev, cfg_size); busdev, cfg_size);
ret = dw_pcie_cfg_write(va_cfg_base + where, size, val); ret = dw_pcie_cfg_write(va_cfg_base + where, size, val);
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, if (pp->num_viewport <= 2)
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
PCIE_ATU_TYPE_IO, pp->io_base, PCIE_ATU_TYPE_IO, pp->io_base,
pp->io_bus_addr, pp->io_size); pp->io_bus_addr, pp->io_size);
...@@ -715,7 +816,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp) ...@@ -715,7 +816,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
u32 val; u32 val;
/* set the number of lanes */ /* set the number of lanes */
dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val); val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
val &= ~PORT_LINK_MODE_MASK; val &= ~PORT_LINK_MODE_MASK;
switch (pp->lanes) { switch (pp->lanes) {
case 1: case 1:
...@@ -737,7 +838,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp) ...@@ -737,7 +838,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL); dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
/* set link width speed control register */ /* set link width speed control register */
dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, &val); val = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
val &= ~PORT_LOGIC_LINK_WIDTH_MASK; val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
switch (pp->lanes) { switch (pp->lanes) {
case 1: case 1:
...@@ -760,19 +861,19 @@ void dw_pcie_setup_rc(struct pcie_port *pp) ...@@ -760,19 +861,19 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1); dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
/* setup interrupt pins */ /* setup interrupt pins */
dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val); val = dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE);
val &= 0xffff00ff; val &= 0xffff00ff;
val |= 0x00000100; val |= 0x00000100;
dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE); dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
/* setup bus numbers */ /* setup bus numbers */
dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS, &val); val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS);
val &= 0xff000000; val &= 0xff000000;
val |= 0x00010100; val |= 0x00010100;
dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS); dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
/* setup command register */ /* setup command register */
dw_pcie_readl_rc(pp, PCI_COMMAND, &val); val = dw_pcie_readl_rc(pp, PCI_COMMAND);
val &= 0xffff0000; val &= 0xffff0000;
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | PCI_COMMAND_SERR; PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
...@@ -783,10 +884,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp) ...@@ -783,10 +884,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
* uses its own address translation component rather than ATU, so * uses its own address translation component rather than ATU, so
* we should not program the ATU here. * we should not program the ATU here.
*/ */
if (!pp->ops->rd_other_conf) if (!pp->ops->rd_other_conf) {
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
PCIE_ATU_TYPE_MEM, pp->mem_base, PCIE_ATU_TYPE_MEM, pp->mem_base,
pp->mem_bus_addr, pp->mem_size); pp->mem_bus_addr, pp->mem_size);
if (pp->num_viewport > 2)
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX2,
PCIE_ATU_TYPE_IO, pp->io_base,
pp->io_bus_addr, pp->io_size);
}
dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
......
...@@ -22,11 +22,6 @@ ...@@ -22,11 +22,6 @@
#define MAX_MSI_IRQS 32 #define MAX_MSI_IRQS 32
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32) #define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
/* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES 10
#define LINK_WAIT_USLEEP_MIN 90000
#define LINK_WAIT_USLEEP_MAX 100000
struct pcie_port { struct pcie_port {
struct device *dev; struct device *dev;
u8 root_bus_nr; u8 root_bus_nr;
...@@ -49,16 +44,17 @@ struct pcie_port { ...@@ -49,16 +44,17 @@ struct pcie_port {
struct resource *busn; struct resource *busn;
int irq; int irq;
u32 lanes; u32 lanes;
u32 num_viewport;
struct pcie_host_ops *ops; struct pcie_host_ops *ops;
int msi_irq; int msi_irq;
struct irq_domain *irq_domain; struct irq_domain *irq_domain;
unsigned long msi_data; unsigned long msi_data;
u8 iatu_unroll_enabled;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
}; };
struct pcie_host_ops { struct pcie_host_ops {
void (*readl_rc)(struct pcie_port *pp, u32 (*readl_rc)(struct pcie_port *pp, void __iomem *dbi_base);
void __iomem *dbi_base, u32 *val);
void (*writel_rc)(struct pcie_port *pp, void (*writel_rc)(struct pcie_port *pp,
u32 val, void __iomem *dbi_base); u32 val, void __iomem *dbi_base);
int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val); int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
......
...@@ -84,8 +84,18 @@ ...@@ -84,8 +84,18 @@
#define IDSETR1 0x011004 #define IDSETR1 0x011004
#define TLCTLR 0x011048 #define TLCTLR 0x011048
#define MACSR 0x011054 #define MACSR 0x011054
#define SPCHGFIN (1 << 4)
#define SPCHGFAIL (1 << 6)
#define SPCHGSUC (1 << 7)
#define LINK_SPEED (0xf << 16)
#define LINK_SPEED_2_5GTS (1 << 16)
#define LINK_SPEED_5_0GTS (2 << 16)
#define MACCTLR 0x011058 #define MACCTLR 0x011058
#define SPEED_CHANGE (1 << 24)
#define SCRAMBLE_DISABLE (1 << 27) #define SCRAMBLE_DISABLE (1 << 27)
#define MACS2R 0x011078
#define MACCGSPSETR 0x011084
#define SPCNGRSN (1 << 31)
/* R-Car H1 PHY */ /* R-Car H1 PHY */
#define H1_PCIEPHYADRR 0x04000c #define H1_PCIEPHYADRR 0x04000c
...@@ -385,11 +395,67 @@ static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci) ...@@ -385,11 +395,67 @@ static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci)
return 1; return 1;
} }
static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
{
unsigned int timeout = 1000;
u32 macsr;
if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
return;
if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
dev_err(pcie->dev, "Speed change already in progress\n");
return;
}
macsr = rcar_pci_read_reg(pcie, MACSR);
if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
goto done;
/* Set target link speed to 5.0 GT/s */
rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
PCI_EXP_LNKSTA_CLS_5_0GB);
/* Set speed change reason as intentional factor */
rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
/* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
rcar_pci_write_reg(pcie, macsr, MACSR);
/* Start link speed change */
rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
while (timeout--) {
macsr = rcar_pci_read_reg(pcie, MACSR);
if (macsr & SPCHGFIN) {
/* Clear the interrupt bits */
rcar_pci_write_reg(pcie, macsr, MACSR);
if (macsr & SPCHGFAIL)
dev_err(pcie->dev, "Speed change failed\n");
goto done;
}
msleep(1);
};
dev_err(pcie->dev, "Speed change timed out\n");
done:
dev_info(pcie->dev, "Current link speed is %s GT/s\n",
(macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
}
static int rcar_pcie_enable(struct rcar_pcie *pcie) static int rcar_pcie_enable(struct rcar_pcie *pcie)
{ {
struct pci_bus *bus, *child; struct pci_bus *bus, *child;
LIST_HEAD(res); LIST_HEAD(res);
/* Try setting 5 GT/s link speed */
rcar_pcie_force_speedup(pcie);
rcar_pcie_setup(&res, pcie); rcar_pcie_setup(&res, pcie);
pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
...@@ -608,6 +674,18 @@ static int rcar_msi_alloc(struct rcar_msi *chip) ...@@ -608,6 +674,18 @@ static int rcar_msi_alloc(struct rcar_msi *chip)
return msi; return msi;
} }
static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs)
{
int msi;
mutex_lock(&chip->lock);
msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
order_base_2(no_irqs));
mutex_unlock(&chip->lock);
return msi;
}
static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq) static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
{ {
mutex_lock(&chip->lock); mutex_lock(&chip->lock);
...@@ -665,7 +743,7 @@ static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, ...@@ -665,7 +743,7 @@ static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
if (hwirq < 0) if (hwirq < 0)
return hwirq; return hwirq;
irq = irq_create_mapping(msi->domain, hwirq); irq = irq_find_mapping(msi->domain, hwirq);
if (!irq) { if (!irq) {
rcar_msi_free(msi, hwirq); rcar_msi_free(msi, hwirq);
return -EINVAL; return -EINVAL;
...@@ -682,6 +760,58 @@ static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, ...@@ -682,6 +760,58 @@ static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
return 0; return 0;
} }
static int rcar_msi_setup_irqs(struct msi_controller *chip,
struct pci_dev *pdev, int nvec, int type)
{
struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
struct rcar_msi *msi = to_rcar_msi(chip);
struct msi_desc *desc;
struct msi_msg msg;
unsigned int irq;
int hwirq;
int i;
/* MSI-X interrupts are not supported */
if (type == PCI_CAP_ID_MSIX)
return -EINVAL;
WARN_ON(!list_is_singular(&pdev->dev.msi_list));
desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
hwirq = rcar_msi_alloc_region(msi, nvec);
if (hwirq < 0)
return -ENOSPC;
irq = irq_find_mapping(msi->domain, hwirq);
if (!irq)
return -ENOSPC;
for (i = 0; i < nvec; i++) {
/*
* irq_create_mapping() called from rcar_pcie_probe() pre-
* allocates descs, so there is no need to allocate descs here.
* We can therefore assume that if irq_find_mapping() above
* returns non-zero, then the descs are also successfully
* allocated.
*/
if (irq_set_msi_desc_off(irq, i, desc)) {
/* TODO: clear */
return -EINVAL;
}
}
desc->nvec_used = nvec;
desc->msi_attrib.multiple = order_base_2(nvec);
msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
msg.data = hwirq;
pci_write_msi_msg(irq, &msg);
return 0;
}
static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
{ {
struct rcar_msi *msi = to_rcar_msi(chip); struct rcar_msi *msi = to_rcar_msi(chip);
...@@ -716,12 +846,13 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) ...@@ -716,12 +846,13 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
struct platform_device *pdev = to_platform_device(pcie->dev); struct platform_device *pdev = to_platform_device(pcie->dev);
struct rcar_msi *msi = &pcie->msi; struct rcar_msi *msi = &pcie->msi;
unsigned long base; unsigned long base;
int err; int err, i;
mutex_init(&msi->lock); mutex_init(&msi->lock);
msi->chip.dev = pcie->dev; msi->chip.dev = pcie->dev;
msi->chip.setup_irq = rcar_msi_setup_irq; msi->chip.setup_irq = rcar_msi_setup_irq;
msi->chip.setup_irqs = rcar_msi_setup_irqs;
msi->chip.teardown_irq = rcar_msi_teardown_irq; msi->chip.teardown_irq = rcar_msi_teardown_irq;
msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR, msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
...@@ -731,6 +862,9 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) ...@@ -731,6 +862,9 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
return -ENOMEM; return -ENOMEM;
} }
for (i = 0; i < INT_PCI_MSI_NR; i++)
irq_create_mapping(msi->domain, i);
/* Two irqs are for MSI, but they are also used for non-MSI irqs */ /* Two irqs are for MSI, but they are also used for non-MSI irqs */
err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq, err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD, IRQF_SHARED | IRQF_NO_THREAD,
...@@ -775,6 +909,10 @@ static int rcar_pcie_get_resources(struct platform_device *pdev, ...@@ -775,6 +909,10 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
if (err) if (err)
return err; return err;
pcie->base = devm_ioremap_resource(&pdev->dev, &res);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
pcie->clk = devm_clk_get(&pdev->dev, "pcie"); pcie->clk = devm_clk_get(&pdev->dev, "pcie");
if (IS_ERR(pcie->clk)) { if (IS_ERR(pcie->clk)) {
dev_err(pcie->dev, "cannot get platform clock\n"); dev_err(pcie->dev, "cannot get platform clock\n");
...@@ -782,7 +920,7 @@ static int rcar_pcie_get_resources(struct platform_device *pdev, ...@@ -782,7 +920,7 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
} }
err = clk_prepare_enable(pcie->clk); err = clk_prepare_enable(pcie->clk);
if (err) if (err)
goto fail_clk; return err;
pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus"); pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
if (IS_ERR(pcie->bus_clk)) { if (IS_ERR(pcie->bus_clk)) {
...@@ -792,7 +930,7 @@ static int rcar_pcie_get_resources(struct platform_device *pdev, ...@@ -792,7 +930,7 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
} }
err = clk_prepare_enable(pcie->bus_clk); err = clk_prepare_enable(pcie->bus_clk);
if (err) if (err)
goto err_map_reg; goto fail_clk;
i = irq_of_parse_and_map(pdev->dev.of_node, 0); i = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (!i) { if (!i) {
...@@ -810,12 +948,6 @@ static int rcar_pcie_get_resources(struct platform_device *pdev, ...@@ -810,12 +948,6 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
} }
pcie->msi.irq2 = i; pcie->msi.irq2 = i;
pcie->base = devm_ioremap_resource(&pdev->dev, &res);
if (IS_ERR(pcie->base)) {
err = PTR_ERR(pcie->base);
goto err_map_reg;
}
return 0; return 0;
err_map_reg: err_map_reg:
...@@ -865,12 +997,16 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie, ...@@ -865,12 +997,16 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
* Set up 64-bit inbound regions as the range parser doesn't * Set up 64-bit inbound regions as the range parser doesn't
* distinguish between 32 and 64-bit types. * distinguish between 32 and 64-bit types.
*/ */
rcar_pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx)); rcar_pci_write_reg(pcie, lower_32_bits(pci_addr),
PCIEPRAR(idx));
rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx)); rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx)); rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags,
PCIELAMR(idx));
rcar_pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1)); rcar_pci_write_reg(pcie, upper_32_bits(pci_addr),
rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1)); PCIEPRAR(idx + 1));
rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr),
PCIELAR(idx + 1));
rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1)); rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1));
pci_addr += size; pci_addr += size;
...@@ -919,6 +1055,7 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie, ...@@ -919,6 +1055,7 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
/* Get the dma-ranges from DT */ /* Get the dma-ranges from DT */
for_each_of_pci_range(&parser, &range) { for_each_of_pci_range(&parser, &range) {
u64 end = range.cpu_addr + range.size - 1; u64 end = range.cpu_addr + range.size - 1;
dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
range.flags, range.cpu_addr, end, range.pci_addr); range.flags, range.cpu_addr, end, range.pci_addr);
...@@ -932,9 +1069,12 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie, ...@@ -932,9 +1069,12 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
static const struct of_device_id rcar_pcie_of_match[] = { static const struct of_device_id rcar_pcie_of_match[] = {
{ .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 }, { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
{ .compatible = "renesas,pcie-rcar-gen2", .data = rcar_pcie_hw_init_gen2 }, { .compatible = "renesas,pcie-rcar-gen2",
{ .compatible = "renesas,pcie-r8a7790", .data = rcar_pcie_hw_init_gen2 }, .data = rcar_pcie_hw_init_gen2 },
{ .compatible = "renesas,pcie-r8a7791", .data = rcar_pcie_hw_init_gen2 }, { .compatible = "renesas,pcie-r8a7790",
.data = rcar_pcie_hw_init_gen2 },
{ .compatible = "renesas,pcie-r8a7791",
.data = rcar_pcie_hw_init_gen2 },
{ .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init }, { .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
{}, {},
}; };
...@@ -947,7 +1087,8 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci) ...@@ -947,7 +1087,8 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
resource_size_t iobase; resource_size_t iobase;
struct resource_entry *win, *tmp; struct resource_entry *win, *tmp;
err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, &iobase); err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
&iobase);
if (err) if (err)
return err; return err;
......
/*
* Rockchip AXI PCIe host controller driver
*
* Copyright (c) 2016 Rockchip, Inc.
*
* Author: Shawn Lin <shawn.lin@rock-chips.com>
* Wenrui Li <wenrui.li@rock-chips.com>
*
* Bits taken from Synopsys Designware Host controller driver and
* ARM PCI Host generic driver.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/regmap.h>
/*
* The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
* bits. This allows atomic updates of the register without locking.
*/
#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4)
#define PCIE_CLIENT_BASE 0x0
#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000
#define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c)
#define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50)
#define PCIE_CLIENT_INTR_MASK GENMASK(8, 5)
#define PCIE_CLIENT_INTR_SHIFT 5
#define PCIE_CLIENT_INT_LEGACY_DONE BIT(15)
#define PCIE_CLIENT_INT_MSG BIT(14)
#define PCIE_CLIENT_INT_HOT_RST BIT(13)
#define PCIE_CLIENT_INT_DPA BIT(12)
#define PCIE_CLIENT_INT_FATAL_ERR BIT(11)
#define PCIE_CLIENT_INT_NFATAL_ERR BIT(10)
#define PCIE_CLIENT_INT_CORR_ERR BIT(9)
#define PCIE_CLIENT_INT_INTD BIT(8)
#define PCIE_CLIENT_INT_INTC BIT(7)
#define PCIE_CLIENT_INT_INTB BIT(6)
#define PCIE_CLIENT_INT_INTA BIT(5)
#define PCIE_CLIENT_INT_LOCAL BIT(4)
#define PCIE_CLIENT_INT_UDMA BIT(3)
#define PCIE_CLIENT_INT_PHY BIT(2)
#define PCIE_CLIENT_INT_HOT_PLUG BIT(1)
#define PCIE_CLIENT_INT_PWR_STCG BIT(0)
#define PCIE_CLIENT_INT_LEGACY \
(PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \
PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD)
#define PCIE_CLIENT_INT_CLI \
(PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \
PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \
PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \
PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \
PCIE_CLIENT_INT_PHY)
#define PCIE_CORE_CTRL_MGMT_BASE 0x900000
#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000)
#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008
#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018
#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006
#define PCIE_CORE_PL_CONF_LANE_SHIFT 1
#define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004)
#define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8)
#define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8
#define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff
#define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020)
#define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000
#define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16
#define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \
(((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT)
#define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c)
#define PCIE_CORE_INT_PRFPE BIT(0)
#define PCIE_CORE_INT_CRFPE BIT(1)
#define PCIE_CORE_INT_RRPE BIT(2)
#define PCIE_CORE_INT_PRFO BIT(3)
#define PCIE_CORE_INT_CRFO BIT(4)
#define PCIE_CORE_INT_RT BIT(5)
#define PCIE_CORE_INT_RTR BIT(6)
#define PCIE_CORE_INT_PE BIT(7)
#define PCIE_CORE_INT_MTR BIT(8)
#define PCIE_CORE_INT_UCR BIT(9)
#define PCIE_CORE_INT_FCE BIT(10)
#define PCIE_CORE_INT_CT BIT(11)
#define PCIE_CORE_INT_UTC BIT(18)
#define PCIE_CORE_INT_MMVC BIT(19)
#define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210)
#define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300)
#define PCIE_CORE_INT \
(PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \
PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \
PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \
PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \
PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \
PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
PCIE_CORE_INT_MMVC)
#define PCIE_RC_CONFIG_BASE 0xa00000
#define PCIE_RC_CONFIG_VENDOR (PCIE_RC_CONFIG_BASE + 0x00)
#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
#define PCIE_RC_CONFIG_SCC_SHIFT 16
#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
#define PCIE_RC_CONFIG_LCS_RETRAIN_LINK BIT(5)
#define PCIE_RC_CONFIG_LCS_LBMIE BIT(10)
#define PCIE_RC_CONFIG_LCS_LABIE BIT(11)
#define PCIE_RC_CONFIG_LCS_LBMS BIT(30)
#define PCIE_RC_CONFIG_LCS_LAMS BIT(31)
#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
#define PCIE_CORE_AXI_CONF_BASE 0xc00000
#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0)
#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f
#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00
#define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4)
#define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8)
#define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc)
#define PCIE_CORE_AXI_INBOUND_BASE 0xc00800
#define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0)
#define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f
#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00
#define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4)
/* Size of one AXI Region (not Region 0) */
#define AXI_REGION_SIZE BIT(20)
/* Size of Region 0, equal to sum of sizes of other regions */
#define AXI_REGION_0_SIZE (32 * (0x1 << 20))
#define OB_REG_SIZE_SHIFT 5
#define IB_ROOT_PORT_REG_SIZE_SHIFT 3
#define AXI_WRAPPER_IO_WRITE 0x6
#define AXI_WRAPPER_MEM_WRITE 0x2
#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
#define MIN_AXI_ADDR_BITS_PASSED 8
#define ROCKCHIP_VENDOR_ID 0x1d87
#define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20)
#define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15)
#define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12)
#define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0)
#define PCIE_ECAM_ADDR(bus, dev, func, reg) \
(PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \
PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg))
#define RC_REGION_0_ADDR_TRANS_H 0x00000000
#define RC_REGION_0_ADDR_TRANS_L 0x00000000
#define RC_REGION_0_PASS_BITS (25 - 1)
#define MAX_AXI_WRAPPER_REGION_NUM 33
struct rockchip_pcie {
void __iomem *reg_base; /* DT axi-base */
void __iomem *apb_base; /* DT apb-base */
struct phy *phy;
struct reset_control *core_rst;
struct reset_control *mgmt_rst;
struct reset_control *mgmt_sticky_rst;
struct reset_control *pipe_rst;
struct clk *aclk_pcie;
struct clk *aclk_perf_pcie;
struct clk *hclk_pcie;
struct clk *clk_pcie_pm;
struct regulator *vpcie3v3; /* 3.3V power supply */
struct regulator *vpcie1v8; /* 1.8V power supply */
struct regulator *vpcie0v9; /* 0.9V power supply */
struct gpio_desc *ep_gpio;
u32 lanes;
u8 root_bus_nr;
struct device *dev;
struct irq_domain *irq_domain;
};
static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
{
return readl(rockchip->apb_base + reg);
}
static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val,
u32 reg)
{
writel(val, rockchip->apb_base + reg);
}
static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
{
u32 status;
status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
status |= (PCIE_RC_CONFIG_LCS_LBMIE | PCIE_RC_CONFIG_LCS_LABIE);
rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
}
static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
{
u32 status;
status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
status |= (PCIE_RC_CONFIG_LCS_LBMS | PCIE_RC_CONFIG_LCS_LAMS);
rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
}
static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
{
u32 val;
/* Update Tx credit maximum update interval */
val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1);
val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK;
val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */
rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1);
}
static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
struct pci_bus *bus, int dev)
{
/* access only one slot on each root port */
if (bus->number == rockchip->root_bus_nr && dev > 0)
return 0;
/*
* do not read more than one device on the bus directly attached
* to RC's downstream side.
*/
if (bus->primary == rockchip->root_bus_nr && dev > 0)
return 0;
return 1;
}
static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
int where, int size, u32 *val)
{
void __iomem *addr = rockchip->apb_base + PCIE_RC_CONFIG_BASE + where;
if (!IS_ALIGNED((uintptr_t)addr, size)) {
*val = 0;
return PCIBIOS_BAD_REGISTER_NUMBER;
}
if (size == 4) {
*val = readl(addr);
} else if (size == 2) {
*val = readw(addr);
} else if (size == 1) {
*val = readb(addr);
} else {
*val = 0;
return PCIBIOS_BAD_REGISTER_NUMBER;
}
return PCIBIOS_SUCCESSFUL;
}
static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
int where, int size, u32 val)
{
u32 mask, tmp, offset;
offset = where & ~0x3;
if (size == 4) {
writel(val, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset);
return PCIBIOS_SUCCESSFUL;
}
mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
/*
* N.B. This read/modify/write isn't safe in general because it can
* corrupt RW1C bits in adjacent registers. But the hardware
* doesn't support smaller writes.
*/
tmp = readl(rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset) & mask;
tmp |= val << ((where & 0x3) * 8);
writel(tmp, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset);
return PCIBIOS_SUCCESSFUL;
}
static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
struct pci_bus *bus, u32 devfn,
int where, int size, u32 *val)
{
u32 busdev;
busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), where);
if (!IS_ALIGNED(busdev, size)) {
*val = 0;
return PCIBIOS_BAD_REGISTER_NUMBER;
}
if (size == 4) {
*val = readl(rockchip->reg_base + busdev);
} else if (size == 2) {
*val = readw(rockchip->reg_base + busdev);
} else if (size == 1) {
*val = readb(rockchip->reg_base + busdev);
} else {
*val = 0;
return PCIBIOS_BAD_REGISTER_NUMBER;
}
return PCIBIOS_SUCCESSFUL;
}
static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
u32 busdev;
busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), where);
if (!IS_ALIGNED(busdev, size))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (size == 4)
writel(val, rockchip->reg_base + busdev);
else if (size == 2)
writew(val, rockchip->reg_base + busdev);
else if (size == 1)
writeb(val, rockchip->reg_base + busdev);
else
return PCIBIOS_BAD_REGISTER_NUMBER;
return PCIBIOS_SUCCESSFUL;
}
static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
struct rockchip_pcie *rockchip = bus->sysdata;
if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
if (bus->number == rockchip->root_bus_nr)
return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, val);
}
static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
struct rockchip_pcie *rockchip = bus->sysdata;
if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
return PCIBIOS_DEVICE_NOT_FOUND;
if (bus->number == rockchip->root_bus_nr)
return rockchip_pcie_wr_own_conf(rockchip, where, size, val);
return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, val);
}
static struct pci_ops rockchip_pcie_ops = {
.read = rockchip_pcie_rd_conf,
.write = rockchip_pcie_wr_conf,
};
/**
* rockchip_pcie_init_port - Initialize hardware
* @rockchip: PCIe port information
*/
static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
int err;
u32 status;
unsigned long timeout;
gpiod_set_value(rockchip->ep_gpio, 0);
err = phy_init(rockchip->phy);
if (err < 0) {
dev_err(dev, "fail to init phy, err %d\n", err);
return err;
}
err = reset_control_assert(rockchip->core_rst);
if (err) {
dev_err(dev, "assert core_rst err %d\n", err);
return err;
}
err = reset_control_assert(rockchip->mgmt_rst);
if (err) {
dev_err(dev, "assert mgmt_rst err %d\n", err);
return err;
}
err = reset_control_assert(rockchip->mgmt_sticky_rst);
if (err) {
dev_err(dev, "assert mgmt_sticky_rst err %d\n", err);
return err;
}
err = reset_control_assert(rockchip->pipe_rst);
if (err) {
dev_err(dev, "assert pipe_rst err %d\n", err);
return err;
}
rockchip_pcie_write(rockchip,
PCIE_CLIENT_CONF_ENABLE |
PCIE_CLIENT_LINK_TRAIN_ENABLE |
PCIE_CLIENT_ARI_ENABLE |
PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes) |
PCIE_CLIENT_MODE_RC |
PCIE_CLIENT_GEN_SEL_2,
PCIE_CLIENT_CONFIG);
err = phy_power_on(rockchip->phy);
if (err) {
dev_err(dev, "fail to power on phy, err %d\n", err);
return err;
}
/*
* Please don't reorder the deassert sequence of the following
* four reset pins.
*/
err = reset_control_deassert(rockchip->mgmt_sticky_rst);
if (err) {
dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
return err;
}
err = reset_control_deassert(rockchip->core_rst);
if (err) {
dev_err(dev, "deassert core_rst err %d\n", err);
return err;
}
err = reset_control_deassert(rockchip->mgmt_rst);
if (err) {
dev_err(dev, "deassert mgmt_rst err %d\n", err);
return err;
}
err = reset_control_deassert(rockchip->pipe_rst);
if (err) {
dev_err(dev, "deassert pipe_rst err %d\n", err);
return err;
}
/*
* We need to read/write PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 before
* enabling ASPM. Otherwise L1PwrOnSc and L1PwrOnVal isn't
* reliable and enabling ASPM doesn't work. This is a controller
* bug we need to work around.
*/
status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2);
rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2);
/* Fix the transmitted FTS count desired to exit from L0s. */
status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
status = (status & PCIE_CORE_CTRL_PLC1_FTS_MASK) |
(PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
/* Enable Gen1 training */
rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
PCIE_CLIENT_CONFIG);
gpiod_set_value(rockchip->ep_gpio, 1);
/* 500ms timeout value should be enough for Gen1/2 training */
timeout = jiffies + msecs_to_jiffies(500);
for (;;) {
status = rockchip_pcie_read(rockchip,
PCIE_CLIENT_BASIC_STATUS1);
if ((status & PCIE_CLIENT_LINK_STATUS_MASK) ==
PCIE_CLIENT_LINK_STATUS_UP) {
dev_dbg(dev, "PCIe link training gen1 pass!\n");
break;
}
if (time_after(jiffies, timeout)) {
dev_err(dev, "PCIe link training gen1 timeout!\n");
return -ETIMEDOUT;
}
msleep(20);
}
/*
* Enable retrain for gen2. This should be configured only after
* gen1 finished.
*/
status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
status |= PCIE_RC_CONFIG_LCS_RETRAIN_LINK;
rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
timeout = jiffies + msecs_to_jiffies(500);
for (;;) {
status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
if ((status & PCIE_CORE_PL_CONF_SPEED_MASK) ==
PCIE_CORE_PL_CONF_SPEED_5G) {
dev_dbg(dev, "PCIe link training gen2 pass!\n");
break;
}
if (time_after(jiffies, timeout)) {
dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
break;
}
msleep(20);
}
/* Check the final link width from negotiated lane counter from MGMT */
status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
PCIE_CORE_PL_CONF_LANE_MASK);
dev_dbg(dev, "current link width is x%d\n", status);
rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
PCIE_RC_CONFIG_VENDOR);
rockchip_pcie_write(rockchip,
PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
PCIE_RC_CONFIG_RID_CCR);
rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
rockchip_pcie_write(rockchip,
(RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
PCIE_CORE_OB_REGION_ADDR0);
rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
PCIE_CORE_OB_REGION_ADDR1);
rockchip_pcie_write(rockchip, 0x0080000a, PCIE_CORE_OB_REGION_DESC0);
rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
return 0;
}
static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
{
struct rockchip_pcie *rockchip = arg;
struct device *dev = rockchip->dev;
u32 reg;
u32 sub_reg;
reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
if (reg & PCIE_CLIENT_INT_LOCAL) {
dev_dbg(dev, "local interrupt received\n");
sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
if (sub_reg & PCIE_CORE_INT_PRFPE)
dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
if (sub_reg & PCIE_CORE_INT_CRFPE)
dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n");
if (sub_reg & PCIE_CORE_INT_RRPE)
dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n");
if (sub_reg & PCIE_CORE_INT_PRFO)
dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n");
if (sub_reg & PCIE_CORE_INT_CRFO)
dev_dbg(dev, "overflow occurred in the completion receive FIFO\n");
if (sub_reg & PCIE_CORE_INT_RT)
dev_dbg(dev, "replay timer timed out\n");
if (sub_reg & PCIE_CORE_INT_RTR)
dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n");
if (sub_reg & PCIE_CORE_INT_PE)
dev_dbg(dev, "phy error detected on receive side\n");
if (sub_reg & PCIE_CORE_INT_MTR)
dev_dbg(dev, "malformed TLP received from the link\n");
if (sub_reg & PCIE_CORE_INT_UCR)
dev_dbg(dev, "malformed TLP received from the link\n");
if (sub_reg & PCIE_CORE_INT_FCE)
dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
if (sub_reg & PCIE_CORE_INT_CT)
dev_dbg(dev, "a request timed out waiting for completion\n");
if (sub_reg & PCIE_CORE_INT_UTC)
dev_dbg(dev, "unmapped TC error\n");
if (sub_reg & PCIE_CORE_INT_MMVC)
dev_dbg(dev, "MSI mask register changes\n");
rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS);
} else if (reg & PCIE_CLIENT_INT_PHY) {
dev_dbg(dev, "phy link changes\n");
rockchip_pcie_update_txcredit_mui(rockchip);
rockchip_pcie_clr_bw_int(rockchip);
}
rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
PCIE_CLIENT_INT_STATUS);
return IRQ_HANDLED;
}
static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
{
struct rockchip_pcie *rockchip = arg;
struct device *dev = rockchip->dev;
u32 reg;
reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
if (reg & PCIE_CLIENT_INT_LEGACY_DONE)
dev_dbg(dev, "legacy done interrupt received\n");
if (reg & PCIE_CLIENT_INT_MSG)
dev_dbg(dev, "message done interrupt received\n");
if (reg & PCIE_CLIENT_INT_HOT_RST)
dev_dbg(dev, "hot reset interrupt received\n");
if (reg & PCIE_CLIENT_INT_DPA)
dev_dbg(dev, "dpa interrupt received\n");
if (reg & PCIE_CLIENT_INT_FATAL_ERR)
dev_dbg(dev, "fatal error interrupt received\n");
if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
dev_dbg(dev, "no fatal error interrupt received\n");
if (reg & PCIE_CLIENT_INT_CORR_ERR)
dev_dbg(dev, "correctable error interrupt received\n");
if (reg & PCIE_CLIENT_INT_PHY)
dev_dbg(dev, "phy interrupt received\n");
rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE |
PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST |
PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR |
PCIE_CLIENT_INT_NFATAL_ERR |
PCIE_CLIENT_INT_CORR_ERR |
PCIE_CLIENT_INT_PHY),
PCIE_CLIENT_INT_STATUS);
return IRQ_HANDLED;
}
static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
struct device *dev = rockchip->dev;
u32 reg;
u32 hwirq;
u32 virq;
chained_irq_enter(chip, desc);
reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT;
while (reg) {
hwirq = ffs(reg) - 1;
reg &= ~BIT(hwirq);
virq = irq_find_mapping(rockchip->irq_domain, hwirq);
if (virq)
generic_handle_irq(virq);
else
dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
}
chained_irq_exit(chip, desc);
}
/**
* rockchip_pcie_parse_dt - Parse Device Tree
* @rockchip: PCIe port information
*
* Return: '0' on success and error value on failure
*/
static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
struct platform_device *pdev = to_platform_device(dev);
struct device_node *node = dev->of_node;
struct resource *regs;
int irq;
int err;
regs = platform_get_resource_byname(pdev,
IORESOURCE_MEM,
"axi-base");
rockchip->reg_base = devm_ioremap_resource(dev, regs);
if (IS_ERR(rockchip->reg_base))
return PTR_ERR(rockchip->reg_base);
regs = platform_get_resource_byname(pdev,
IORESOURCE_MEM,
"apb-base");
rockchip->apb_base = devm_ioremap_resource(dev, regs);
if (IS_ERR(rockchip->apb_base))
return PTR_ERR(rockchip->apb_base);
rockchip->phy = devm_phy_get(dev, "pcie-phy");
if (IS_ERR(rockchip->phy)) {
if (PTR_ERR(rockchip->phy) != -EPROBE_DEFER)
dev_err(dev, "missing phy\n");
return PTR_ERR(rockchip->phy);
}
rockchip->lanes = 1;
err = of_property_read_u32(node, "num-lanes", &rockchip->lanes);
if (!err && (rockchip->lanes == 0 ||
rockchip->lanes == 3 ||
rockchip->lanes > 4)) {
dev_warn(dev, "invalid num-lanes, default to use one lane\n");
rockchip->lanes = 1;
}
rockchip->core_rst = devm_reset_control_get(dev, "core");
if (IS_ERR(rockchip->core_rst)) {
if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
dev_err(dev, "missing core reset property in node\n");
return PTR_ERR(rockchip->core_rst);
}
rockchip->mgmt_rst = devm_reset_control_get(dev, "mgmt");
if (IS_ERR(rockchip->mgmt_rst)) {
if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
dev_err(dev, "missing mgmt reset property in node\n");
return PTR_ERR(rockchip->mgmt_rst);
}
rockchip->mgmt_sticky_rst = devm_reset_control_get(dev, "mgmt-sticky");
if (IS_ERR(rockchip->mgmt_sticky_rst)) {
if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
dev_err(dev, "missing mgmt-sticky reset property in node\n");
return PTR_ERR(rockchip->mgmt_sticky_rst);
}
rockchip->pipe_rst = devm_reset_control_get(dev, "pipe");
if (IS_ERR(rockchip->pipe_rst)) {
if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
dev_err(dev, "missing pipe reset property in node\n");
return PTR_ERR(rockchip->pipe_rst);
}
rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
if (IS_ERR(rockchip->ep_gpio)) {
dev_err(dev, "missing ep-gpios property in node\n");
return PTR_ERR(rockchip->ep_gpio);
}
rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
if (IS_ERR(rockchip->aclk_pcie)) {
dev_err(dev, "aclk clock not found\n");
return PTR_ERR(rockchip->aclk_pcie);
}
rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
if (IS_ERR(rockchip->aclk_perf_pcie)) {
dev_err(dev, "aclk_perf clock not found\n");
return PTR_ERR(rockchip->aclk_perf_pcie);
}
rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
if (IS_ERR(rockchip->hclk_pcie)) {
dev_err(dev, "hclk clock not found\n");
return PTR_ERR(rockchip->hclk_pcie);
}
rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
if (IS_ERR(rockchip->clk_pcie_pm)) {
dev_err(dev, "pm clock not found\n");
return PTR_ERR(rockchip->clk_pcie_pm);
}
irq = platform_get_irq_byname(pdev, "sys");
if (irq < 0) {
dev_err(dev, "missing sys IRQ resource\n");
return -EINVAL;
}
err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
IRQF_SHARED, "pcie-sys", rockchip);
if (err) {
dev_err(dev, "failed to request PCIe subsystem IRQ\n");
return err;
}
irq = platform_get_irq_byname(pdev, "legacy");
if (irq < 0) {
dev_err(dev, "missing legacy IRQ resource\n");
return -EINVAL;
}
irq_set_chained_handler_and_data(irq,
rockchip_pcie_legacy_int_handler,
rockchip);
irq = platform_get_irq_byname(pdev, "client");
if (irq < 0) {
dev_err(dev, "missing client IRQ resource\n");
return -EINVAL;
}
err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
IRQF_SHARED, "pcie-client", rockchip);
if (err) {
dev_err(dev, "failed to request PCIe client IRQ\n");
return err;
}
rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
if (IS_ERR(rockchip->vpcie3v3)) {
if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_info(dev, "no vpcie3v3 regulator found\n");
}
rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
if (IS_ERR(rockchip->vpcie1v8)) {
if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_info(dev, "no vpcie1v8 regulator found\n");
}
rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
if (IS_ERR(rockchip->vpcie0v9)) {
if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_info(dev, "no vpcie0v9 regulator found\n");
}
return 0;
}
static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
int err;
if (!IS_ERR(rockchip->vpcie3v3)) {
err = regulator_enable(rockchip->vpcie3v3);
if (err) {
dev_err(dev, "fail to enable vpcie3v3 regulator\n");
goto err_out;
}
}
if (!IS_ERR(rockchip->vpcie1v8)) {
err = regulator_enable(rockchip->vpcie1v8);
if (err) {
dev_err(dev, "fail to enable vpcie1v8 regulator\n");
goto err_disable_3v3;
}
}
if (!IS_ERR(rockchip->vpcie0v9)) {
err = regulator_enable(rockchip->vpcie0v9);
if (err) {
dev_err(dev, "fail to enable vpcie0v9 regulator\n");
goto err_disable_1v8;
}
}
return 0;
err_disable_1v8:
if (!IS_ERR(rockchip->vpcie1v8))
regulator_disable(rockchip->vpcie1v8);
err_disable_3v3:
if (!IS_ERR(rockchip->vpcie3v3))
regulator_disable(rockchip->vpcie3v3);
err_out:
return err;
}
static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
{
rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) &
(~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK);
rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT),
PCIE_CORE_INT_MASK);
rockchip_pcie_enable_bw_int(rockchip);
}
static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
return 0;
}
static const struct irq_domain_ops intx_domain_ops = {
.map = rockchip_pcie_intx_map,
};
static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
struct device_node *intc = of_get_next_child(dev->of_node, NULL);
if (!intc) {
dev_err(dev, "missing child interrupt-controller node\n");
return -EINVAL;
}
rockchip->irq_domain = irq_domain_add_linear(intc, 4,
&intx_domain_ops, rockchip);
if (!rockchip->irq_domain) {
dev_err(dev, "failed to get a INTx IRQ domain\n");
return -EINVAL;
}
return 0;
}
static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
int region_no, int type, u8 num_pass_bits,
u32 lower_addr, u32 upper_addr)
{
u32 ob_addr_0;
u32 ob_addr_1;
u32 ob_desc_0;
u32 aw_offset;
if (region_no >= MAX_AXI_WRAPPER_REGION_NUM)
return -EINVAL;
if (num_pass_bits + 1 < 8)
return -EINVAL;
if (num_pass_bits > 63)
return -EINVAL;
if (region_no == 0) {
if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
return -EINVAL;
}
if (region_no != 0) {
if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
return -EINVAL;
}
aw_offset = (region_no << OB_REG_SIZE_SHIFT);
ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS;
ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR;
ob_addr_1 = upper_addr;
ob_desc_0 = (1 << 23 | type);
rockchip_pcie_write(rockchip, ob_addr_0,
PCIE_CORE_OB_REGION_ADDR0 + aw_offset);
rockchip_pcie_write(rockchip, ob_addr_1,
PCIE_CORE_OB_REGION_ADDR1 + aw_offset);
rockchip_pcie_write(rockchip, ob_desc_0,
PCIE_CORE_OB_REGION_DESC0 + aw_offset);
rockchip_pcie_write(rockchip, 0,
PCIE_CORE_OB_REGION_DESC1 + aw_offset);
return 0;
}
static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
int region_no, u8 num_pass_bits,
u32 lower_addr, u32 upper_addr)
{
u32 ib_addr_0;
u32 ib_addr_1;
u32 aw_offset;
if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM)
return -EINVAL;
if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED)
return -EINVAL;
if (num_pass_bits > 63)
return -EINVAL;
aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT);
ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS;
ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR;
ib_addr_1 = upper_addr;
rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset);
rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset);
return 0;
}
static int rockchip_pcie_probe(struct platform_device *pdev)
{
struct rockchip_pcie *rockchip;
struct device *dev = &pdev->dev;
struct pci_bus *bus, *child;
struct resource_entry *win;
resource_size_t io_base;
struct resource *mem;
struct resource *io;
phys_addr_t io_bus_addr = 0;
u32 io_size;
phys_addr_t mem_bus_addr = 0;
u32 mem_size = 0;
int reg_no;
int err;
int offset;
LIST_HEAD(res);
if (!dev->of_node)
return -ENODEV;
rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
if (!rockchip)
return -ENOMEM;
rockchip->dev = dev;
err = rockchip_pcie_parse_dt(rockchip);
if (err)
return err;
err = clk_prepare_enable(rockchip->aclk_pcie);
if (err) {
dev_err(dev, "unable to enable aclk_pcie clock\n");
goto err_aclk_pcie;
}
err = clk_prepare_enable(rockchip->aclk_perf_pcie);
if (err) {
dev_err(dev, "unable to enable aclk_perf_pcie clock\n");
goto err_aclk_perf_pcie;
}
err = clk_prepare_enable(rockchip->hclk_pcie);
if (err) {
dev_err(dev, "unable to enable hclk_pcie clock\n");
goto err_hclk_pcie;
}
err = clk_prepare_enable(rockchip->clk_pcie_pm);
if (err) {
dev_err(dev, "unable to enable hclk_pcie clock\n");
goto err_pcie_pm;
}
err = rockchip_pcie_set_vpcie(rockchip);
if (err) {
dev_err(dev, "failed to set vpcie regulator\n");
goto err_set_vpcie;
}
err = rockchip_pcie_init_port(rockchip);
if (err)
goto err_vpcie;
platform_set_drvdata(pdev, rockchip);
rockchip_pcie_enable_interrupts(rockchip);
err = rockchip_pcie_init_irq_domain(rockchip);
if (err < 0)
goto err_vpcie;
err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
&res, &io_base);
if (err)
goto err_vpcie;
err = devm_request_pci_bus_resources(dev, &res);
if (err)
goto err_vpcie;
/* Get the I/O and memory ranges from DT */
io_size = 0;
resource_list_for_each_entry(win, &res) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
io = win->res;
io->name = "I/O";
io_size = resource_size(io);
io_bus_addr = io->start - win->offset;
err = pci_remap_iospace(io, io_base);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, io);
continue;
}
break;
case IORESOURCE_MEM:
mem = win->res;
mem->name = "MEM";
mem_size = resource_size(mem);
mem_bus_addr = mem->start - win->offset;
break;
case IORESOURCE_BUS:
rockchip->root_bus_nr = win->res->start;
break;
default:
continue;
}
}
if (mem_size) {
for (reg_no = 0; reg_no < (mem_size >> 20); reg_no++) {
err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
AXI_WRAPPER_MEM_WRITE,
20 - 1,
mem_bus_addr +
(reg_no << 20),
0);
if (err) {
dev_err(dev, "program RC mem outbound ATU failed\n");
goto err_vpcie;
}
}
}
err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
if (err) {
dev_err(dev, "program RC mem inbound ATU failed\n");
goto err_vpcie;
}
offset = mem_size >> 20;
if (io_size) {
for (reg_no = 0; reg_no < (io_size >> 20); reg_no++) {
err = rockchip_pcie_prog_ob_atu(rockchip,
reg_no + 1 + offset,
AXI_WRAPPER_IO_WRITE,
20 - 1,
io_bus_addr +
(reg_no << 20),
0);
if (err) {
dev_err(dev, "program RC io outbound ATU failed\n");
goto err_vpcie;
}
}
}
bus = pci_scan_root_bus(&pdev->dev, 0, &rockchip_pcie_ops, rockchip, &res);
if (!bus) {
err = -ENOMEM;
goto err_vpcie;
}
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
return err;
err_vpcie:
if (!IS_ERR(rockchip->vpcie3v3))
regulator_disable(rockchip->vpcie3v3);
if (!IS_ERR(rockchip->vpcie1v8))
regulator_disable(rockchip->vpcie1v8);
if (!IS_ERR(rockchip->vpcie0v9))
regulator_disable(rockchip->vpcie0v9);
err_set_vpcie:
clk_disable_unprepare(rockchip->clk_pcie_pm);
err_pcie_pm:
clk_disable_unprepare(rockchip->hclk_pcie);
err_hclk_pcie:
clk_disable_unprepare(rockchip->aclk_perf_pcie);
err_aclk_perf_pcie:
clk_disable_unprepare(rockchip->aclk_pcie);
err_aclk_pcie:
return err;
}
static const struct of_device_id rockchip_pcie_of_match[] = {
{ .compatible = "rockchip,rk3399-pcie", },
{}
};
static struct platform_driver rockchip_pcie_driver = {
.driver = {
.name = "rockchip-pcie",
.of_match_table = rockchip_pcie_of_match,
},
.probe = rockchip_pcie_probe,
};
builtin_platform_driver(rockchip_pcie_driver);
...@@ -85,10 +85,15 @@ ...@@ -85,10 +85,15 @@
#define MSGF_MISC_SR_MASTER_ERR BIT(5) #define MSGF_MISC_SR_MASTER_ERR BIT(5)
#define MSGF_MISC_SR_I_ADDR_ERR BIT(6) #define MSGF_MISC_SR_I_ADDR_ERR BIT(6)
#define MSGF_MISC_SR_E_ADDR_ERR BIT(7) #define MSGF_MISC_SR_E_ADDR_ERR BIT(7)
#define MSGF_MISC_SR_FATAL_AER BIT(16)
#define MSGF_MISC_SR_NON_FATAL_AER BIT(17)
#define MSGF_MISC_SR_CORR_AER BIT(18)
#define MSGF_MISC_SR_UR_DETECT BIT(20) #define MSGF_MISC_SR_UR_DETECT BIT(20)
#define MSGF_MISC_SR_NON_FATAL_DEV BIT(22)
#define MSGF_MISC_SR_PCIE_CORE GENMASK(18, 16) #define MSGF_MISC_SR_FATAL_DEV BIT(23)
#define MSGF_MISC_SR_PCIE_CORE_ERR GENMASK(31, 22) #define MSGF_MISC_SR_LINK_DOWN BIT(24)
#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25)
#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26)
#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \ #define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \
MSGF_MISC_SR_RXMSG_OVER | \ MSGF_MISC_SR_RXMSG_OVER | \
...@@ -96,9 +101,15 @@ ...@@ -96,9 +101,15 @@
MSGF_MISC_SR_MASTER_ERR | \ MSGF_MISC_SR_MASTER_ERR | \
MSGF_MISC_SR_I_ADDR_ERR | \ MSGF_MISC_SR_I_ADDR_ERR | \
MSGF_MISC_SR_E_ADDR_ERR | \ MSGF_MISC_SR_E_ADDR_ERR | \
MSGF_MISC_SR_FATAL_AER | \
MSGF_MISC_SR_NON_FATAL_AER | \
MSGF_MISC_SR_CORR_AER | \
MSGF_MISC_SR_UR_DETECT | \ MSGF_MISC_SR_UR_DETECT | \
MSGF_MISC_SR_PCIE_CORE | \ MSGF_MISC_SR_NON_FATAL_DEV | \
MSGF_MISC_SR_PCIE_CORE_ERR) MSGF_MISC_SR_FATAL_DEV | \
MSGF_MISC_SR_LINK_DOWN | \
MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \
MSGF_MSIC_SR_LINK_BWIDTH)
/* Legacy interrupt status mask bits */ /* Legacy interrupt status mask bits */
#define MSGF_LEG_SR_INTA BIT(0) #define MSGF_LEG_SR_INTA BIT(0)
...@@ -109,8 +120,8 @@ ...@@ -109,8 +120,8 @@
MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD) MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD)
/* MSI interrupt status mask bits */ /* MSI interrupt status mask bits */
#define MSGF_MSI_SR_LO_MASK BIT(0) #define MSGF_MSI_SR_LO_MASK GENMASK(31, 0)
#define MSGF_MSI_SR_HI_MASK BIT(0) #define MSGF_MSI_SR_HI_MASK GENMASK(31, 0)
#define MSII_PRESENT BIT(0) #define MSII_PRESENT BIT(0)
#define MSII_ENABLE BIT(0) #define MSII_ENABLE BIT(0)
...@@ -291,8 +302,29 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data) ...@@ -291,8 +302,29 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
dev_err(pcie->dev, dev_err(pcie->dev,
"In Misc Egress address translation error\n"); "In Misc Egress address translation error\n");
if (misc_stat & MSGF_MISC_SR_PCIE_CORE_ERR) if (misc_stat & MSGF_MISC_SR_FATAL_AER)
dev_err(pcie->dev, "PCIe Core error\n"); dev_err(pcie->dev, "Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
dev_err(pcie->dev, "Non-Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_CORR_AER)
dev_err(pcie->dev, "Correctable Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_UR_DETECT)
dev_err(pcie->dev, "Unsupported request Detected\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
dev_err(pcie->dev, "Non-Fatal Error Detected\n");
if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
dev_err(pcie->dev, "Fatal Error Detected\n");
if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
dev_info(pcie->dev, "Link Autonomous Bandwidth Management Status bit set\n");
if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
dev_info(pcie->dev, "Link Bandwidth Management Status bit set\n");
/* Clear misc interrupt status */ /* Clear misc interrupt status */
nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS); nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS);
......
...@@ -101,7 +101,8 @@ ...@@ -101,7 +101,8 @@
* @msi_pages: MSI pages * @msi_pages: MSI pages
* @root_busno: Root Bus number * @root_busno: Root Bus number
* @dev: Device pointer * @dev: Device pointer
* @irq_domain: IRQ domain pointer * @msi_domain: MSI IRQ domain pointer
* @leg_domain: Legacy IRQ domain pointer
* @resources: Bus Resources * @resources: Bus Resources
*/ */
struct xilinx_pcie_port { struct xilinx_pcie_port {
...@@ -110,7 +111,8 @@ struct xilinx_pcie_port { ...@@ -110,7 +111,8 @@ struct xilinx_pcie_port {
unsigned long msi_pages; unsigned long msi_pages;
u8 root_busno; u8 root_busno;
struct device *dev; struct device *dev;
struct irq_domain *irq_domain; struct irq_domain *msi_domain;
struct irq_domain *leg_domain;
struct list_head resources; struct list_head resources;
}; };
...@@ -212,13 +214,15 @@ static void xilinx_pcie_destroy_msi(unsigned int irq) ...@@ -212,13 +214,15 @@ static void xilinx_pcie_destroy_msi(unsigned int irq)
{ {
struct msi_desc *msi; struct msi_desc *msi;
struct xilinx_pcie_port *port; struct xilinx_pcie_port *port;
struct irq_data *d = irq_get_irq_data(irq);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
if (!test_bit(irq, msi_irq_in_use)) { if (!test_bit(hwirq, msi_irq_in_use)) {
msi = irq_get_msi_desc(irq); msi = irq_get_msi_desc(irq);
port = msi_desc_to_pci_sysdata(msi); port = msi_desc_to_pci_sysdata(msi);
dev_err(port->dev, "Trying to free unused MSI#%d\n", irq); dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
} else { } else {
clear_bit(irq, msi_irq_in_use); clear_bit(hwirq, msi_irq_in_use);
} }
} }
...@@ -250,6 +254,7 @@ static void xilinx_msi_teardown_irq(struct msi_controller *chip, ...@@ -250,6 +254,7 @@ static void xilinx_msi_teardown_irq(struct msi_controller *chip,
unsigned int irq) unsigned int irq)
{ {
xilinx_pcie_destroy_msi(irq); xilinx_pcie_destroy_msi(irq);
irq_dispose_mapping(irq);
} }
/** /**
...@@ -274,7 +279,7 @@ static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip, ...@@ -274,7 +279,7 @@ static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip,
if (hwirq < 0) if (hwirq < 0)
return hwirq; return hwirq;
irq = irq_create_mapping(port->irq_domain, hwirq); irq = irq_create_mapping(port->msi_domain, hwirq);
if (!irq) if (!irq)
return -EINVAL; return -EINVAL;
...@@ -425,7 +430,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) ...@@ -425,7 +430,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
/* Check whether interrupt valid */ /* Check whether interrupt valid */
if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) { if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
dev_warn(port->dev, "RP Intr FIFO1 read error\n"); dev_warn(port->dev, "RP Intr FIFO1 read error\n");
return IRQ_HANDLED; goto error;
} }
if (!(val & XILINX_PCIE_RPIFR1_MSI_INTR)) { if (!(val & XILINX_PCIE_RPIFR1_MSI_INTR)) {
...@@ -436,7 +441,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) ...@@ -436,7 +441,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
/* Handle INTx Interrupt */ /* Handle INTx Interrupt */
val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >> val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1; XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1;
generic_handle_irq(irq_find_mapping(port->irq_domain, generic_handle_irq(irq_find_mapping(port->leg_domain,
val)); val));
} }
} }
...@@ -447,7 +452,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) ...@@ -447,7 +452,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) { if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
dev_warn(port->dev, "RP Intr FIFO1 read error\n"); dev_warn(port->dev, "RP Intr FIFO1 read error\n");
return IRQ_HANDLED; goto error;
} }
if (val & XILINX_PCIE_RPIFR1_MSI_INTR) { if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
...@@ -492,6 +497,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) ...@@ -492,6 +497,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
if (status & XILINX_PCIE_INTR_MST_ERRP) if (status & XILINX_PCIE_INTR_MST_ERRP)
dev_warn(port->dev, "Master error poison\n"); dev_warn(port->dev, "Master error poison\n");
error:
/* Clear the Interrupt Decode register */ /* Clear the Interrupt Decode register */
pcie_write(port, status, XILINX_PCIE_REG_IDR); pcie_write(port, status, XILINX_PCIE_REG_IDR);
...@@ -517,21 +523,21 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) ...@@ -517,21 +523,21 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
return -ENODEV; return -ENODEV;
} }
port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4, port->leg_domain = irq_domain_add_linear(pcie_intc_node, 4,
&intx_domain_ops, &intx_domain_ops,
port); port);
if (!port->irq_domain) { if (!port->leg_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n"); dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENODEV; return -ENODEV;
} }
/* Setup MSI */ /* Setup MSI */
if (IS_ENABLED(CONFIG_PCI_MSI)) { if (IS_ENABLED(CONFIG_PCI_MSI)) {
port->irq_domain = irq_domain_add_linear(node, port->msi_domain = irq_domain_add_linear(node,
XILINX_NUM_MSI_IRQS, XILINX_NUM_MSI_IRQS,
&msi_domain_ops, &msi_domain_ops,
&xilinx_pcie_msi_chip); &xilinx_pcie_msi_chip);
if (!port->irq_domain) { if (!port->msi_domain) {
dev_err(dev, "Failed to get a MSI IRQ domain\n"); dev_err(dev, "Failed to get a MSI IRQ domain\n");
return -ENODEV; return -ENODEV;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment