blob: 2153956a0b207cae268ffabf8392a3025f22432b [file] [log] [blame]
Bjorn Helgaas8cfab3c2018-01-26 12:50:27 -06001// SPDX-License-Identifier: GPL-2.0
Jingoo Han340cba62013-06-21 16:24:54 +09002/*
Bjorn Helgaas96291d52017-09-01 16:35:50 -05003 * Synopsys DesignWare PCIe host controller driver
Jingoo Han340cba62013-06-21 16:24:54 +09004 *
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com
7 *
8 * Author: Jingoo Han <jg1.han@samsung.com>
Jingoo Han340cba62013-06-21 16:24:54 +09009 */
10
Joao Pinto886bc5c2016-03-10 14:44:35 -060011#include <linux/delay.h>
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +053012#include <linux/of.h>
13#include <linux/types.h>
Jingoo Han340cba62013-06-21 16:24:54 +090014
Jingoo Han4b1ced82013-07-31 17:14:10 +090015#include "pcie-designware.h"
Jingoo Han340cba62013-06-21 16:24:54 +090016
Joao Pintodac29e62016-03-10 14:44:44 -060017/* PCIe Port Logic registers */
18#define PLR_OFFSET 0x700
19#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c)
Jisheng Zhang01c07672016-08-17 15:57:37 -050020#define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4)
21#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29)
Joao Pintodac29e62016-03-10 14:44:44 -060022
Kishon Vijay Abraham I19ce01cc2017-02-15 18:48:12 +053023int dw_pcie_read(void __iomem *addr, int size, u32 *val)
Jingoo Han340cba62013-06-21 16:24:54 +090024{
Gabriele Paolonib6b18f52015-10-08 14:27:53 -050025 if ((uintptr_t)addr & (size - 1)) {
26 *val = 0;
27 return PCIBIOS_BAD_REGISTER_NUMBER;
28 }
29
Kishon Vijay Abraham I314fc852017-02-15 18:48:16 +053030 if (size == 4) {
Gabriele Paolonic003ca92015-10-08 14:27:43 -050031 *val = readl(addr);
Kishon Vijay Abraham I314fc852017-02-15 18:48:16 +053032 } else if (size == 2) {
Gabriele Paoloni4c458522015-10-08 14:27:48 -050033 *val = readw(addr);
Kishon Vijay Abraham I314fc852017-02-15 18:48:16 +053034 } else if (size == 1) {
Gabriele Paoloni4c458522015-10-08 14:27:48 -050035 *val = readb(addr);
Kishon Vijay Abraham I314fc852017-02-15 18:48:16 +053036 } else {
Gabriele Paolonic003ca92015-10-08 14:27:43 -050037 *val = 0;
Jingoo Han340cba62013-06-21 16:24:54 +090038 return PCIBIOS_BAD_REGISTER_NUMBER;
Gabriele Paolonic003ca92015-10-08 14:27:43 -050039 }
Jingoo Han340cba62013-06-21 16:24:54 +090040
41 return PCIBIOS_SUCCESSFUL;
42}
43
Kishon Vijay Abraham I19ce01cc2017-02-15 18:48:12 +053044int dw_pcie_write(void __iomem *addr, int size, u32 val)
Jingoo Han340cba62013-06-21 16:24:54 +090045{
Gabriele Paolonib6b18f52015-10-08 14:27:53 -050046 if ((uintptr_t)addr & (size - 1))
47 return PCIBIOS_BAD_REGISTER_NUMBER;
48
Jingoo Han340cba62013-06-21 16:24:54 +090049 if (size == 4)
50 writel(val, addr);
51 else if (size == 2)
Gabriele Paoloni4c458522015-10-08 14:27:48 -050052 writew(val, addr);
Jingoo Han340cba62013-06-21 16:24:54 +090053 else if (size == 1)
Gabriele Paoloni4c458522015-10-08 14:27:48 -050054 writeb(val, addr);
Jingoo Han340cba62013-06-21 16:24:54 +090055 else
56 return PCIBIOS_BAD_REGISTER_NUMBER;
57
58 return PCIBIOS_SUCCESSFUL;
59}
60
Kishon Vijay Abraham Ia509d7d2017-03-13 19:13:26 +053061u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
62 size_t size)
Jingoo Han340cba62013-06-21 16:24:54 +090063{
Kishon Vijay Abraham Ia509d7d2017-03-13 19:13:26 +053064 int ret;
65 u32 val;
Bjorn Helgaas446fc232016-08-17 14:17:58 -050066
Kishon Vijay Abraham Ia509d7d2017-03-13 19:13:26 +053067 if (pci->ops->read_dbi)
68 return pci->ops->read_dbi(pci, base, reg, size);
69
70 ret = dw_pcie_read(base + reg, size, &val);
71 if (ret)
Gustavo Pimentelb4a8a512018-05-14 16:09:48 +010072 dev_err(pci->dev, "Read DBI address failed\n");
Kishon Vijay Abraham Ia509d7d2017-03-13 19:13:26 +053073
74 return val;
Jingoo Han340cba62013-06-21 16:24:54 +090075}
76
Kishon Vijay Abraham Ia509d7d2017-03-13 19:13:26 +053077void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
78 size_t size, u32 val)
Jingoo Han340cba62013-06-21 16:24:54 +090079{
Kishon Vijay Abraham Ia509d7d2017-03-13 19:13:26 +053080 int ret;
81
82 if (pci->ops->write_dbi) {
83 pci->ops->write_dbi(pci, base, reg, size, val);
84 return;
85 }
86
87 ret = dw_pcie_write(base + reg, size, val);
88 if (ret)
Gustavo Pimentelb4a8a512018-05-14 16:09:48 +010089 dev_err(pci->dev, "Write DBI address failed\n");
Jingoo Han340cba62013-06-21 16:24:54 +090090}
91
Kishon Vijay Abraham Iedd45e32017-03-13 19:13:27 +053092static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
Joao Pintoa0601a42016-08-10 11:02:39 +010093{
94 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
95
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +053096 return dw_pcie_readl_dbi(pci, offset + reg);
Joao Pintoa0601a42016-08-10 11:02:39 +010097}
98
Kishon Vijay Abraham Iedd45e32017-03-13 19:13:27 +053099static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
100 u32 val)
Joao Pintoa0601a42016-08-10 11:02:39 +0100101{
102 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
103
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530104 dw_pcie_writel_dbi(pci, offset + reg, val);
Joao Pintoa0601a42016-08-10 11:02:39 +0100105}
106
Carlos Palminha684a3a92017-07-17 14:13:34 +0100107static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
108 int type, u64 cpu_addr,
109 u64 pci_addr, u32 size)
Kishon Vijay Abraham Iedd45e32017-03-13 19:13:27 +0530110{
111 u32 retries, val;
112
113 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
114 lower_32_bits(cpu_addr));
115 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
116 upper_32_bits(cpu_addr));
117 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
118 lower_32_bits(cpu_addr + size - 1));
119 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
120 lower_32_bits(pci_addr));
121 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
122 upper_32_bits(pci_addr));
123 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
124 type);
125 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
126 PCIE_ATU_ENABLE);
127
128 /*
129 * Make sure ATU enable takes effect before any subsequent config
130 * and I/O accesses.
131 */
132 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
133 val = dw_pcie_readl_ob_unroll(pci, index,
134 PCIE_ATU_UNR_REGION_CTRL2);
135 if (val & PCIE_ATU_ENABLE)
136 return;
137
Jisheng Zhang90241432018-09-20 16:32:52 -0500138 mdelay(LINK_WAIT_IATU);
Kishon Vijay Abraham Iedd45e32017-03-13 19:13:27 +0530139 }
Gustavo Pimentelb4a8a512018-05-14 16:09:48 +0100140 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
Kishon Vijay Abraham Iedd45e32017-03-13 19:13:27 +0530141}
142
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530143void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
144 u64 cpu_addr, u64 pci_addr, u32 size)
Jisheng Zhang63503c82015-04-30 16:22:28 +0800145{
Joao Pintod8bbeb32016-08-17 13:26:07 -0500146 u32 retries, val;
Stanimir Varbanov17209df2015-12-18 14:38:55 +0200147
Kishon Vijay Abraham Ia6600832017-03-13 19:13:22 +0530148 if (pci->ops->cpu_addr_fixup)
Niklas Casselb6900ae2017-12-20 00:29:36 +0100149 cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
Kishon Vijay Abraham Ia6600832017-03-13 19:13:22 +0530150
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530151 if (pci->iatu_unroll_enabled) {
Kishon Vijay Abraham Iedd45e32017-03-13 19:13:27 +0530152 dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
153 pci_addr, size);
154 return;
Joao Pintoa0601a42016-08-10 11:02:39 +0100155 }
Stanimir Varbanov17209df2015-12-18 14:38:55 +0200156
Kishon Vijay Abraham Iedd45e32017-03-13 19:13:27 +0530157 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
158 PCIE_ATU_REGION_OUTBOUND | index);
159 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
160 lower_32_bits(cpu_addr));
161 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
162 upper_32_bits(cpu_addr));
163 dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
164 lower_32_bits(cpu_addr + size - 1));
165 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
166 lower_32_bits(pci_addr));
167 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
168 upper_32_bits(pci_addr));
169 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
170 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
171
Stanimir Varbanov17209df2015-12-18 14:38:55 +0200172 /*
173 * Make sure ATU enable takes effect before any subsequent config
174 * and I/O accesses.
175 */
Joao Pintod8bbeb32016-08-17 13:26:07 -0500176 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
Kishon Vijay Abraham Iedd45e32017-03-13 19:13:27 +0530177 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
Jisheng Zhange9be4d72017-07-18 14:48:21 +0800178 if (val & PCIE_ATU_ENABLE)
Joao Pintod8bbeb32016-08-17 13:26:07 -0500179 return;
180
Jisheng Zhang90241432018-09-20 16:32:52 -0500181 mdelay(LINK_WAIT_IATU);
Joao Pintod8bbeb32016-08-17 13:26:07 -0500182 }
Gustavo Pimentelb4a8a512018-05-14 16:09:48 +0100183 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
Jisheng Zhang63503c82015-04-30 16:22:28 +0800184}
185
Kishon Vijay Abraham If8aed6e2017-03-27 15:15:05 +0530186static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
187{
188 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
189
190 return dw_pcie_readl_dbi(pci, offset + reg);
191}
192
193static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
194 u32 val)
195{
196 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
197
198 dw_pcie_writel_dbi(pci, offset + reg, val);
199}
200
Carlos Palminha684a3a92017-07-17 14:13:34 +0100201static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
202 int bar, u64 cpu_addr,
203 enum dw_pcie_as_type as_type)
Kishon Vijay Abraham If8aed6e2017-03-27 15:15:05 +0530204{
205 int type;
206 u32 retries, val;
207
208 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
209 lower_32_bits(cpu_addr));
210 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
211 upper_32_bits(cpu_addr));
212
213 switch (as_type) {
214 case DW_PCIE_AS_MEM:
215 type = PCIE_ATU_TYPE_MEM;
216 break;
217 case DW_PCIE_AS_IO:
218 type = PCIE_ATU_TYPE_IO;
219 break;
220 default:
221 return -EINVAL;
222 }
223
224 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
225 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
226 PCIE_ATU_ENABLE |
227 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
228
229 /*
230 * Make sure ATU enable takes effect before any subsequent config
231 * and I/O accesses.
232 */
233 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
234 val = dw_pcie_readl_ib_unroll(pci, index,
235 PCIE_ATU_UNR_REGION_CTRL2);
236 if (val & PCIE_ATU_ENABLE)
237 return 0;
238
Jisheng Zhang90241432018-09-20 16:32:52 -0500239 mdelay(LINK_WAIT_IATU);
Kishon Vijay Abraham If8aed6e2017-03-27 15:15:05 +0530240 }
Gustavo Pimentelb4a8a512018-05-14 16:09:48 +0100241 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
Kishon Vijay Abraham If8aed6e2017-03-27 15:15:05 +0530242
243 return -EBUSY;
244}
245
246int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
247 u64 cpu_addr, enum dw_pcie_as_type as_type)
248{
249 int type;
250 u32 retries, val;
251
252 if (pci->iatu_unroll_enabled)
253 return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
254 cpu_addr, as_type);
255
256 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
257 index);
258 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
259 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
260
261 switch (as_type) {
262 case DW_PCIE_AS_MEM:
263 type = PCIE_ATU_TYPE_MEM;
264 break;
265 case DW_PCIE_AS_IO:
266 type = PCIE_ATU_TYPE_IO;
267 break;
268 default:
269 return -EINVAL;
270 }
271
272 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
273 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
274 | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
275
276 /*
277 * Make sure ATU enable takes effect before any subsequent config
278 * and I/O accesses.
279 */
280 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
281 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
282 if (val & PCIE_ATU_ENABLE)
283 return 0;
284
Jisheng Zhang90241432018-09-20 16:32:52 -0500285 mdelay(LINK_WAIT_IATU);
Kishon Vijay Abraham If8aed6e2017-03-27 15:15:05 +0530286 }
Gustavo Pimentelb4a8a512018-05-14 16:09:48 +0100287 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
Kishon Vijay Abraham If8aed6e2017-03-27 15:15:05 +0530288
289 return -EBUSY;
290}
291
292void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
293 enum dw_pcie_region_type type)
294{
295 int region;
296
297 switch (type) {
298 case DW_PCIE_REGION_INBOUND:
299 region = PCIE_ATU_REGION_INBOUND;
300 break;
301 case DW_PCIE_REGION_OUTBOUND:
302 region = PCIE_ATU_REGION_OUTBOUND;
303 break;
304 default:
305 return;
306 }
307
308 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
309 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE);
310}
311
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530312int dw_pcie_wait_for_link(struct dw_pcie *pci)
Joao Pinto886bc5c2016-03-10 14:44:35 -0600313{
314 int retries;
315
Gustavo Pimentelb4a8a512018-05-14 16:09:48 +0100316 /* Check if the link is up or not */
Joao Pinto886bc5c2016-03-10 14:44:35 -0600317 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530318 if (dw_pcie_link_up(pci)) {
Gustavo Pimentelb4a8a512018-05-14 16:09:48 +0100319 dev_info(pci->dev, "Link up\n");
Joao Pinto886bc5c2016-03-10 14:44:35 -0600320 return 0;
321 }
322 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
323 }
324
Gustavo Pimentelb4a8a512018-05-14 16:09:48 +0100325 dev_err(pci->dev, "Phy link never came up\n");
Joao Pinto886bc5c2016-03-10 14:44:35 -0600326
327 return -ETIMEDOUT;
328}
329
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530330int dw_pcie_link_up(struct dw_pcie *pci)
Jingoo Han340cba62013-06-21 16:24:54 +0900331{
Joao Pintodac29e62016-03-10 14:44:44 -0600332 u32 val;
333
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530334 if (pci->ops->link_up)
335 return pci->ops->link_up(pci);
Bjorn Helgaas116a4892016-01-05 15:48:11 -0600336
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530337 val = readl(pci->dbi_base + PCIE_PHY_DEBUG_R1);
Jisheng Zhang01c07672016-08-17 15:57:37 -0500338 return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) &&
339 (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)));
Jingoo Han340cba62013-06-21 16:24:54 +0900340}
341
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530342void dw_pcie_setup(struct dw_pcie *pci)
Jingoo Han4b1ced82013-07-31 17:14:10 +0900343{
Kishon Vijay Abraham I5f334db2017-02-15 18:48:15 +0530344 int ret;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900345 u32 val;
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530346 u32 lanes;
Kishon Vijay Abraham I5f334db2017-02-15 18:48:15 +0530347 struct device *dev = pci->dev;
348 struct device_node *np = dev->of_node;
349
350 ret = of_property_read_u32(np, "num-lanes", &lanes);
351 if (ret)
352 lanes = 0;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900353
Gustavo Pimentelb4a8a512018-05-14 16:09:48 +0100354 /* Set the number of lanes */
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530355 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900356 val &= ~PORT_LINK_MODE_MASK;
Kishon Vijay Abraham I5f334db2017-02-15 18:48:15 +0530357 switch (lanes) {
Jingoo Han4b1ced82013-07-31 17:14:10 +0900358 case 1:
359 val |= PORT_LINK_MODE_1_LANES;
360 break;
361 case 2:
362 val |= PORT_LINK_MODE_2_LANES;
363 break;
364 case 4:
365 val |= PORT_LINK_MODE_4_LANES;
366 break;
Zhou Wang5b0f0732015-05-13 14:44:34 +0800367 case 8:
368 val |= PORT_LINK_MODE_8_LANES;
369 break;
Gabriele Paoloni907fce02015-09-29 00:03:10 +0800370 default:
Kishon Vijay Abraham I5f334db2017-02-15 18:48:15 +0530371 dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes);
Gabriele Paoloni907fce02015-09-29 00:03:10 +0800372 return;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900373 }
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530374 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900375
Gustavo Pimentelb4a8a512018-05-14 16:09:48 +0100376 /* Set link width speed control register */
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530377 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900378 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
Kishon Vijay Abraham I5f334db2017-02-15 18:48:15 +0530379 switch (lanes) {
Jingoo Han4b1ced82013-07-31 17:14:10 +0900380 case 1:
381 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
382 break;
383 case 2:
384 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
385 break;
386 case 4:
387 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
388 break;
Zhou Wang5b0f0732015-05-13 14:44:34 +0800389 case 8:
390 val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
391 break;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900392 }
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530393 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900394}