Skip to content

Commit 8babd8a

Browse files
mpillai-cadencebjorn-helgaas
authored andcommitted
PCI: cadence: Add support for High Perf Architecture (HPA) controller
Add support for Cadence PCIe RP configuration for High Performance Architecture (HPA) controllers. The Cadence High Performance controllers are the latest PCIe controllers that have support for DMA, optional IDE and updated register set. Add a common library for High Performance Architecture (HPA) PCIe controllers. Signed-off-by: Manikandan K Pillai <mpillai@cadence.com> Signed-off-by: Manivannan Sadhasivam <mani@kernel.org> [bhelgaas: squash https://lore.kernel.org/r/20251120093518.2760492-1-jiapeng.chong@linux.alibaba.com, squash https://lore.kernel.org/all/52abaad8-a43e-4e29-93d7-86a3245692c3@cixtech.com/] Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Link: https://patch.msgid.link/20251108140305.1120117-5-hans.zhang@cixtech.com
1 parent b80a7b4 commit 8babd8a

7 files changed

Lines changed: 913 additions & 21 deletions

File tree

drivers/pci/controller/cadence/Makefile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# SPDX-License-Identifier: GPL-2.0
2-
pcie-cadence-mod-y := pcie-cadence.o
3-
pcie-cadence-host-mod-y := pcie-cadence-host-common.o pcie-cadence-host.o
2+
pcie-cadence-mod-y := pcie-cadence-hpa.o pcie-cadence.o
3+
pcie-cadence-host-mod-y := pcie-cadence-host-common.o pcie-cadence-host.o pcie-cadence-host-hpa.o
44
pcie-cadence-ep-mod-y := pcie-cadence-ep.o
55

66
obj-$(CONFIG_PCIE_CADENCE) = pcie-cadence-mod.o
Lines changed: 368 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,368 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/*
3+
* Cadence PCIe host controller driver.
4+
*
5+
* Copyright (c) 2024, Cadence Design Systems
6+
* Author: Manikandan K Pillai <mpillai@cadence.com>
7+
*/
8+
#include <linux/delay.h>
9+
#include <linux/kernel.h>
10+
#include <linux/list_sort.h>
11+
#include <linux/of_address.h>
12+
#include <linux/of_pci.h>
13+
#include <linux/of_irq.h>
14+
#include <linux/platform_device.h>
15+
16+
#include "pcie-cadence.h"
17+
#include "pcie-cadence-host-common.h"
18+
19+
static u8 bar_aperture_mask[] = {
20+
[RP_BAR0] = 0x3F,
21+
[RP_BAR1] = 0x3F,
22+
};
23+
24+
void __iomem *cdns_pci_hpa_map_bus(struct pci_bus *bus, unsigned int devfn,
25+
int where)
26+
{
27+
struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
28+
struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
29+
struct cdns_pcie *pcie = &rc->pcie;
30+
unsigned int busn = bus->number;
31+
u32 addr0, desc0, desc1, ctrl0;
32+
u32 regval;
33+
34+
if (pci_is_root_bus(bus)) {
35+
/*
36+
* Only the root port (devfn == 0) is connected to this bus.
37+
* All other PCI devices are behind some bridge hence on another
38+
* bus.
39+
*/
40+
if (devfn)
41+
return NULL;
42+
43+
return pcie->reg_base + (where & 0xfff);
44+
}
45+
46+
/* Clear AXI link-down status */
47+
regval = cdns_pcie_hpa_readl(pcie, REG_BANK_AXI_SLAVE, CDNS_PCIE_HPA_AT_LINKDOWN);
48+
cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, CDNS_PCIE_HPA_AT_LINKDOWN,
49+
(regval & ~GENMASK(0, 0)));
50+
51+
/* Update Output registers for AXI region 0 */
52+
addr0 = CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
53+
CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
54+
CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS(busn);
55+
cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
56+
CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(0), addr0);
57+
58+
desc1 = cdns_pcie_hpa_readl(pcie, REG_BANK_AXI_SLAVE,
59+
CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0));
60+
desc1 &= ~CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN_MASK;
61+
desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(0);
62+
ctrl0 = CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS |
63+
CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN;
64+
65+
if (busn == bridge->busnr + 1)
66+
desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
67+
else
68+
desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
69+
70+
cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
71+
CDNS_PCIE_HPA_AT_OB_REGION_DESC0(0), desc0);
72+
cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
73+
CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0), desc1);
74+
cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
75+
CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(0), ctrl0);
76+
77+
return rc->cfg_base + (where & 0xfff);
78+
}
79+
80+
static struct pci_ops cdns_pcie_hpa_host_ops = {
81+
.map_bus = cdns_pci_hpa_map_bus,
82+
.read = pci_generic_config_read,
83+
.write = pci_generic_config_write,
84+
};
85+
86+
static void cdns_pcie_hpa_host_enable_ptm_response(struct cdns_pcie *pcie)
87+
{
88+
u32 val;
89+
90+
val = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_LM_PTM_CTRL);
91+
cdns_pcie_hpa_writel(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_LM_PTM_CTRL,
92+
val | CDNS_PCIE_HPA_LM_PTM_CTRL_PTMRSEN);
93+
}
94+
95+
static int cdns_pcie_hpa_host_bar_ib_config(struct cdns_pcie_rc *rc,
96+
enum cdns_pcie_rp_bar bar,
97+
u64 cpu_addr, u64 size,
98+
unsigned long flags)
99+
{
100+
struct cdns_pcie *pcie = &rc->pcie;
101+
u32 addr0, addr1, aperture, value;
102+
103+
if (!rc->avail_ib_bar[bar])
104+
return -ENODEV;
105+
106+
rc->avail_ib_bar[bar] = false;
107+
108+
aperture = ilog2(size);
109+
if (bar == RP_NO_BAR) {
110+
addr0 = CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS(aperture) |
111+
(lower_32_bits(cpu_addr) & GENMASK(31, 8));
112+
addr1 = upper_32_bits(cpu_addr);
113+
} else {
114+
addr0 = lower_32_bits(cpu_addr);
115+
addr1 = upper_32_bits(cpu_addr);
116+
}
117+
cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_MASTER,
118+
CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0(bar), addr0);
119+
cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_MASTER,
120+
CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR1(bar), addr1);
121+
122+
if (bar == RP_NO_BAR)
123+
bar = (enum cdns_pcie_rp_bar)BAR_0;
124+
125+
value = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_CFG_CTRL_REG, CDNS_PCIE_HPA_LM_RC_BAR_CFG);
126+
value &= ~(HPA_LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
127+
HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
128+
HPA_LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
129+
HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
130+
HPA_LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 7));
131+
if (size + cpu_addr >= SZ_4G) {
132+
value |= HPA_LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar);
133+
if ((flags & IORESOURCE_PREFETCH))
134+
value |= HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar);
135+
} else {
136+
value |= HPA_LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar);
137+
if ((flags & IORESOURCE_PREFETCH))
138+
value |= HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar);
139+
}
140+
141+
value |= HPA_LM_RC_BAR_CFG_APERTURE(bar, aperture);
142+
cdns_pcie_hpa_writel(pcie, REG_BANK_IP_CFG_CTRL_REG, CDNS_PCIE_HPA_LM_RC_BAR_CFG, value);
143+
144+
return 0;
145+
}
146+
147+
static int cdns_pcie_hpa_host_init_root_port(struct cdns_pcie_rc *rc)
148+
{
149+
struct cdns_pcie *pcie = &rc->pcie;
150+
u32 value, ctrl;
151+
152+
/*
153+
* Set the root port BAR configuration register:
154+
* - disable both BAR0 and BAR1
155+
* - enable Prefetchable Memory Base and Limit registers in type 1
156+
* config space (64 bits)
157+
* - enable IO Base and Limit registers in type 1 config
158+
* space (32 bits)
159+
*/
160+
161+
ctrl = CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_DISABLED;
162+
value = CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
163+
CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
164+
CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
165+
CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
166+
CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_ENABLE |
167+
CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_32BITS;
168+
cdns_pcie_hpa_writel(pcie, REG_BANK_IP_CFG_CTRL_REG,
169+
CDNS_PCIE_HPA_LM_RC_BAR_CFG, value);
170+
171+
if (rc->vendor_id != 0xffff)
172+
cdns_pcie_hpa_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id);
173+
174+
if (rc->device_id != 0xffff)
175+
cdns_pcie_hpa_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
176+
177+
cdns_pcie_hpa_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
178+
cdns_pcie_hpa_rp_writeb(pcie, PCI_CLASS_PROG, 0);
179+
cdns_pcie_hpa_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
180+
181+
/* Enable bus mastering */
182+
value = cdns_pcie_hpa_readl(pcie, REG_BANK_RP, PCI_COMMAND);
183+
value |= (PCI_COMMAND_MEMORY | PCI_COMMAND_IO | PCI_COMMAND_MASTER);
184+
cdns_pcie_hpa_writel(pcie, REG_BANK_RP, PCI_COMMAND, value);
185+
return 0;
186+
}
187+
188+
static void cdns_pcie_hpa_create_region_for_cfg(struct cdns_pcie_rc *rc)
189+
{
190+
struct cdns_pcie *pcie = &rc->pcie;
191+
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
192+
struct resource *cfg_res = rc->cfg_res;
193+
struct resource_entry *entry;
194+
u64 cpu_addr = cfg_res->start;
195+
u32 addr0, addr1, desc1;
196+
int busnr = 0;
197+
198+
entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
199+
if (entry)
200+
busnr = entry->res->start;
201+
202+
cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
203+
CDNS_PCIE_HPA_TAG_MANAGEMENT, 0x01000000);
204+
/*
205+
* Reserve region 0 for PCI configure space accesses:
206+
* OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
207+
* cdns_pci_map_bus(), other region registers are set here once for all
208+
*/
209+
desc1 = CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(busnr);
210+
cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
211+
CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(0), 0x0);
212+
/* Type-1 CFG */
213+
cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
214+
CDNS_PCIE_HPA_AT_OB_REGION_DESC0(0), 0x05000000);
215+
cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
216+
CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0), desc1);
217+
218+
addr0 = CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
219+
(lower_32_bits(cpu_addr) & GENMASK(31, 8));
220+
addr1 = upper_32_bits(cpu_addr);
221+
cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
222+
CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(0), addr0);
223+
cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
224+
CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(0), addr1);
225+
cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
226+
CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(0), 0x06000000);
227+
}
228+
229+
static int cdns_pcie_hpa_host_init_address_translation(struct cdns_pcie_rc *rc)
230+
{
231+
struct cdns_pcie *pcie = &rc->pcie;
232+
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
233+
struct resource_entry *entry;
234+
int r = 0, busnr = 0;
235+
236+
if (!rc->ecam_supported)
237+
cdns_pcie_hpa_create_region_for_cfg(rc);
238+
239+
entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
240+
if (entry)
241+
busnr = entry->res->start;
242+
243+
r++;
244+
if (pcie->msg_res) {
245+
cdns_pcie_hpa_set_outbound_region_for_normal_msg(pcie, busnr, 0, r,
246+
pcie->msg_res->start);
247+
248+
r++;
249+
}
250+
resource_list_for_each_entry(entry, &bridge->windows) {
251+
struct resource *res = entry->res;
252+
u64 pci_addr = res->start - entry->offset;
253+
254+
if (resource_type(res) == IORESOURCE_IO)
255+
cdns_pcie_hpa_set_outbound_region(pcie, busnr, 0, r,
256+
true,
257+
pci_pio_to_address(res->start),
258+
pci_addr,
259+
resource_size(res));
260+
else
261+
cdns_pcie_hpa_set_outbound_region(pcie, busnr, 0, r,
262+
false,
263+
res->start,
264+
pci_addr,
265+
resource_size(res));
266+
267+
r++;
268+
}
269+
270+
if (rc->no_inbound_map)
271+
return 0;
272+
else
273+
return cdns_pcie_host_map_dma_ranges(rc, cdns_pcie_hpa_host_bar_ib_config);
274+
}
275+
276+
static int cdns_pcie_hpa_host_init(struct cdns_pcie_rc *rc)
277+
{
278+
int err;
279+
280+
err = cdns_pcie_hpa_host_init_root_port(rc);
281+
if (err)
282+
return err;
283+
284+
return cdns_pcie_hpa_host_init_address_translation(rc);
285+
}
286+
287+
int cdns_pcie_hpa_host_link_setup(struct cdns_pcie_rc *rc)
288+
{
289+
struct cdns_pcie *pcie = &rc->pcie;
290+
struct device *dev = rc->pcie.dev;
291+
int ret;
292+
293+
if (rc->quirk_detect_quiet_flag)
294+
cdns_pcie_hpa_detect_quiet_min_delay_set(&rc->pcie);
295+
296+
cdns_pcie_hpa_host_enable_ptm_response(pcie);
297+
298+
ret = cdns_pcie_start_link(pcie);
299+
if (ret) {
300+
dev_err(dev, "Failed to start link\n");
301+
return ret;
302+
}
303+
304+
ret = cdns_pcie_host_wait_for_link(pcie, cdns_pcie_hpa_link_up);
305+
if (ret)
306+
dev_dbg(dev, "PCIe link never came up\n");
307+
308+
return ret;
309+
}
310+
EXPORT_SYMBOL_GPL(cdns_pcie_hpa_host_link_setup);
311+
312+
int cdns_pcie_hpa_host_setup(struct cdns_pcie_rc *rc)
313+
{
314+
struct device *dev = rc->pcie.dev;
315+
struct platform_device *pdev = to_platform_device(dev);
316+
struct pci_host_bridge *bridge;
317+
enum cdns_pcie_rp_bar bar;
318+
struct cdns_pcie *pcie;
319+
struct resource *res;
320+
int ret;
321+
322+
bridge = pci_host_bridge_from_priv(rc);
323+
if (!bridge)
324+
return -ENOMEM;
325+
326+
pcie = &rc->pcie;
327+
pcie->is_rc = true;
328+
329+
if (!pcie->reg_base) {
330+
pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg");
331+
if (IS_ERR(pcie->reg_base)) {
332+
dev_err(dev, "missing \"reg\"\n");
333+
return PTR_ERR(pcie->reg_base);
334+
}
335+
}
336+
337+
/* ECAM config space is remapped at glue layer */
338+
if (!rc->cfg_base) {
339+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
340+
rc->cfg_base = devm_pci_remap_cfg_resource(dev, res);
341+
if (IS_ERR(rc->cfg_base))
342+
return PTR_ERR(rc->cfg_base);
343+
rc->cfg_res = res;
344+
}
345+
346+
/* Put EROM Bar aperture to 0 */
347+
cdns_pcie_hpa_writel(pcie, REG_BANK_IP_CFG_CTRL_REG, CDNS_PCIE_EROM, 0x0);
348+
349+
ret = cdns_pcie_hpa_host_link_setup(rc);
350+
if (ret)
351+
return ret;
352+
353+
for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
354+
rc->avail_ib_bar[bar] = true;
355+
356+
ret = cdns_pcie_hpa_host_init(rc);
357+
if (ret)
358+
return ret;
359+
360+
if (!bridge->ops)
361+
bridge->ops = &cdns_pcie_hpa_host_ops;
362+
363+
return pci_host_probe(bridge);
364+
}
365+
EXPORT_SYMBOL_GPL(cdns_pcie_hpa_host_setup);
366+
367+
MODULE_LICENSE("GPL");
368+
MODULE_DESCRIPTION("Cadence PCIe host controller driver");

0 commit comments

Comments
 (0)