blob: 2354ea51e871428211ef13fee8f976844ab1f32c [file] [log] [blame]
Gavin Shan29310e52013-06-20 13:21:13 +08001/*
2 * The file intends to implement the platform dependent EEH operations on
3 * powernv platform. Actually, the powernv was created in order to fully
4 * hypervisor support.
5 *
6 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/atomic.h>
Gavin Shan4cf17442015-02-16 14:45:41 +110015#include <linux/debugfs.h>
Gavin Shan29310e52013-06-20 13:21:13 +080016#include <linux/delay.h>
17#include <linux/export.h>
18#include <linux/init.h>
Alistair Popple79231442015-05-15 14:06:40 +100019#include <linux/interrupt.h>
Gavin Shan29310e52013-06-20 13:21:13 +080020#include <linux/list.h>
21#include <linux/msi.h>
22#include <linux/of.h>
23#include <linux/pci.h>
24#include <linux/proc_fs.h>
25#include <linux/rbtree.h>
26#include <linux/sched.h>
27#include <linux/seq_file.h>
28#include <linux/spinlock.h>
29
30#include <asm/eeh.h>
31#include <asm/eeh_event.h>
32#include <asm/firmware.h>
33#include <asm/io.h>
34#include <asm/iommu.h>
35#include <asm/machdep.h>
36#include <asm/msi_bitmap.h>
37#include <asm/opal.h>
38#include <asm/ppc-pci.h>
Gavin Shan9c0e1ec2016-05-20 16:41:39 +100039#include <asm/pnv-pci.h>
Gavin Shan29310e52013-06-20 13:21:13 +080040
41#include "powernv.h"
42#include "pci.h"
43
Gavin Shan4cf17442015-02-16 14:45:41 +110044static bool pnv_eeh_nb_init = false;
Alistair Popple79231442015-05-15 14:06:40 +100045static int eeh_event_irq = -EINVAL;
Gavin Shan4cf17442015-02-16 14:45:41 +110046
Gavin Shan01f3bfb2015-02-16 14:45:39 +110047static int pnv_eeh_init(void)
Gavin Shan29310e52013-06-20 13:21:13 +080048{
Gavin Shandc561fb2014-07-17 14:41:39 +100049 struct pci_controller *hose;
50 struct pnv_phb *phb;
51
Stewart Smithe4d54f72015-12-09 17:18:20 +110052 if (!firmware_has_feature(FW_FEATURE_OPAL)) {
53 pr_warn("%s: OPAL is required !\n",
Gavin Shan0dae2742014-07-17 14:41:41 +100054 __func__);
Gavin Shan29310e52013-06-20 13:21:13 +080055 return -EINVAL;
56 }
57
Gavin Shan05b17212014-07-17 14:41:38 +100058 /* Set probe mode */
59 eeh_add_flag(EEH_PROBE_MODE_DEV);
Gavin Shan29310e52013-06-20 13:21:13 +080060
Gavin Shandc561fb2014-07-17 14:41:39 +100061 /*
62 * P7IOC blocks PCI config access to frozen PE, but PHB3
63 * doesn't do that. So we have to selectively enable I/O
64 * prior to collecting error log.
65 */
66 list_for_each_entry(hose, &hose_list, list_node) {
67 phb = hose->private_data;
68
69 if (phb->model == PNV_PHB_MODEL_P7IOC)
70 eeh_add_flag(EEH_ENABLE_IO_FOR_LOG);
Gavin Shan2aa5cf92014-11-25 09:27:00 +110071
72 /*
73 * PE#0 should be regarded as valid by EEH core
74 * if it's not the reserved one. Currently, we
Gavin Shan608fb9c2015-10-08 14:58:57 +110075 * have the reserved PE#255 and PE#127 for PHB3
Gavin Shan2aa5cf92014-11-25 09:27:00 +110076 * and P7IOC separately. So we should regard
Gavin Shan608fb9c2015-10-08 14:58:57 +110077 * PE#0 as valid for PHB3 and P7IOC.
Gavin Shan2aa5cf92014-11-25 09:27:00 +110078 */
Gavin Shan92b8f132016-05-03 15:41:24 +100079 if (phb->ioda.reserved_pe_idx != 0)
Gavin Shan2aa5cf92014-11-25 09:27:00 +110080 eeh_add_flag(EEH_VALID_PE_ZERO);
81
Gavin Shandc561fb2014-07-17 14:41:39 +100082 break;
83 }
84
Gavin Shan29310e52013-06-20 13:21:13 +080085 return 0;
86}
87
Alistair Popple79231442015-05-15 14:06:40 +100088static irqreturn_t pnv_eeh_event(int irq, void *data)
Gavin Shan4cf17442015-02-16 14:45:41 +110089{
Gavin Shan4cf17442015-02-16 14:45:41 +110090 /*
Alistair Popple79231442015-05-15 14:06:40 +100091 * We simply send a special EEH event if EEH has been
92 * enabled. We don't care about EEH events until we've
93 * finished processing the outstanding ones. Event processing
94 * gets unmasked in next_error() if EEH is enabled.
Gavin Shan4cf17442015-02-16 14:45:41 +110095 */
Alistair Popple79231442015-05-15 14:06:40 +100096 disable_irq_nosync(irq);
Gavin Shan4cf17442015-02-16 14:45:41 +110097
98 if (eeh_enabled())
99 eeh_send_failure_event(NULL);
Gavin Shan4cf17442015-02-16 14:45:41 +1100100
Alistair Popple79231442015-05-15 14:06:40 +1000101 return IRQ_HANDLED;
Gavin Shan4cf17442015-02-16 14:45:41 +1100102}
103
Gavin Shan4cf17442015-02-16 14:45:41 +1100104#ifdef CONFIG_DEBUG_FS
105static ssize_t pnv_eeh_ei_write(struct file *filp,
106 const char __user *user_buf,
107 size_t count, loff_t *ppos)
108{
109 struct pci_controller *hose = filp->private_data;
110 struct eeh_dev *edev;
111 struct eeh_pe *pe;
112 int pe_no, type, func;
113 unsigned long addr, mask;
114 char buf[50];
115 int ret;
116
117 if (!eeh_ops || !eeh_ops->err_inject)
118 return -ENXIO;
119
120 /* Copy over argument buffer */
121 ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
122 if (!ret)
123 return -EFAULT;
124
125 /* Retrieve parameters */
126 ret = sscanf(buf, "%x:%x:%x:%lx:%lx",
127 &pe_no, &type, &func, &addr, &mask);
128 if (ret != 5)
129 return -EINVAL;
130
131 /* Retrieve PE */
132 edev = kzalloc(sizeof(*edev), GFP_KERNEL);
133 if (!edev)
134 return -ENOMEM;
135 edev->phb = hose;
136 edev->pe_config_addr = pe_no;
137 pe = eeh_pe_get(edev);
138 kfree(edev);
139 if (!pe)
140 return -ENODEV;
141
142 /* Do error injection */
143 ret = eeh_ops->err_inject(pe, type, func, addr, mask);
144 return ret < 0 ? ret : count;
145}
146
147static const struct file_operations pnv_eeh_ei_fops = {
148 .open = simple_open,
149 .llseek = no_llseek,
150 .write = pnv_eeh_ei_write,
151};
152
153static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val)
154{
155 struct pci_controller *hose = data;
156 struct pnv_phb *phb = hose->private_data;
157
158 out_be64(phb->regs + offset, val);
159 return 0;
160}
161
162static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val)
163{
164 struct pci_controller *hose = data;
165 struct pnv_phb *phb = hose->private_data;
166
167 *val = in_be64(phb->regs + offset);
168 return 0;
169}
170
Gavin Shanccc9662da2016-02-09 15:50:24 +1100171#define PNV_EEH_DBGFS_ENTRY(name, reg) \
172static int pnv_eeh_dbgfs_set_##name(void *data, u64 val) \
173{ \
174 return pnv_eeh_dbgfs_set(data, reg, val); \
175} \
176 \
177static int pnv_eeh_dbgfs_get_##name(void *data, u64 *val) \
178{ \
179 return pnv_eeh_dbgfs_get(data, reg, val); \
180} \
181 \
182DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_dbgfs_ops_##name, \
183 pnv_eeh_dbgfs_get_##name, \
184 pnv_eeh_dbgfs_set_##name, \
185 "0x%llx\n")
Gavin Shan4cf17442015-02-16 14:45:41 +1100186
Gavin Shanccc9662da2016-02-09 15:50:24 +1100187PNV_EEH_DBGFS_ENTRY(outb, 0xD10);
188PNV_EEH_DBGFS_ENTRY(inbA, 0xD90);
189PNV_EEH_DBGFS_ENTRY(inbB, 0xE10);
Gavin Shan4cf17442015-02-16 14:45:41 +1100190
Gavin Shan4cf17442015-02-16 14:45:41 +1100191#endif /* CONFIG_DEBUG_FS */
192
Gavin Shan29310e52013-06-20 13:21:13 +0800193/**
Gavin Shan01f3bfb2015-02-16 14:45:39 +1100194 * pnv_eeh_post_init - EEH platform dependent post initialization
Gavin Shan29310e52013-06-20 13:21:13 +0800195 *
196 * EEH platform dependent post initialization on powernv. When
197 * the function is called, the EEH PEs and devices should have
198 * been built. If the I/O cache staff has been built, EEH is
199 * ready to supply service.
200 */
Gavin Shan01f3bfb2015-02-16 14:45:39 +1100201static int pnv_eeh_post_init(void)
Gavin Shan29310e52013-06-20 13:21:13 +0800202{
203 struct pci_controller *hose;
204 struct pnv_phb *phb;
205 int ret = 0;
206
Gavin Shan4cf17442015-02-16 14:45:41 +1100207 /* Register OPAL event notifier */
208 if (!pnv_eeh_nb_init) {
Alistair Popple79231442015-05-15 14:06:40 +1000209 eeh_event_irq = opal_event_request(ilog2(OPAL_EVENT_PCI_ERROR));
210 if (eeh_event_irq < 0) {
211 pr_err("%s: Can't register OPAL event interrupt (%d)\n",
212 __func__, eeh_event_irq);
213 return eeh_event_irq;
214 }
215
216 ret = request_irq(eeh_event_irq, pnv_eeh_event,
217 IRQ_TYPE_LEVEL_HIGH, "opal-eeh", NULL);
218 if (ret < 0) {
219 irq_dispose_mapping(eeh_event_irq);
220 pr_err("%s: Can't request OPAL event interrupt (%d)\n",
221 __func__, eeh_event_irq);
Gavin Shan4cf17442015-02-16 14:45:41 +1100222 return ret;
223 }
224
225 pnv_eeh_nb_init = true;
226 }
227
Alistair Popple79231442015-05-15 14:06:40 +1000228 if (!eeh_enabled())
229 disable_irq(eeh_event_irq);
230
Gavin Shan29310e52013-06-20 13:21:13 +0800231 list_for_each_entry(hose, &hose_list, list_node) {
232 phb = hose->private_data;
233
Gavin Shan4cf17442015-02-16 14:45:41 +1100234 /*
235 * If EEH is enabled, we're going to rely on that.
236 * Otherwise, we restore to conventional mechanism
237 * to clear frozen PE during PCI config access.
238 */
239 if (eeh_enabled())
240 phb->flags |= PNV_PHB_FLAG_EEH;
241 else
242 phb->flags &= ~PNV_PHB_FLAG_EEH;
243
244 /* Create debugfs entries */
245#ifdef CONFIG_DEBUG_FS
246 if (phb->has_dbgfs || !phb->dbgfs)
247 continue;
248
249 phb->has_dbgfs = 1;
250 debugfs_create_file("err_injct", 0200,
251 phb->dbgfs, hose,
252 &pnv_eeh_ei_fops);
253
254 debugfs_create_file("err_injct_outbound", 0600,
255 phb->dbgfs, hose,
Gavin Shanccc9662da2016-02-09 15:50:24 +1100256 &pnv_eeh_dbgfs_ops_outb);
Gavin Shan4cf17442015-02-16 14:45:41 +1100257 debugfs_create_file("err_injct_inboundA", 0600,
258 phb->dbgfs, hose,
Gavin Shanccc9662da2016-02-09 15:50:24 +1100259 &pnv_eeh_dbgfs_ops_inbA);
Gavin Shan4cf17442015-02-16 14:45:41 +1100260 debugfs_create_file("err_injct_inboundB", 0600,
261 phb->dbgfs, hose,
Gavin Shanccc9662da2016-02-09 15:50:24 +1100262 &pnv_eeh_dbgfs_ops_inbB);
Gavin Shan4cf17442015-02-16 14:45:41 +1100263#endif /* CONFIG_DEBUG_FS */
Gavin Shan29310e52013-06-20 13:21:13 +0800264 }
265
266 return ret;
267}
268
Gavin Shan4d6186c2015-10-08 14:58:58 +1100269static int pnv_eeh_find_cap(struct pci_dn *pdn, int cap)
Gavin Shanff57b452015-03-17 16:15:06 +1100270{
Gavin Shan4d6186c2015-10-08 14:58:58 +1100271 int pos = PCI_CAPABILITY_LIST;
272 int cnt = 48; /* Maximal number of capabilities */
273 u32 status, id;
Gavin Shanff57b452015-03-17 16:15:06 +1100274
275 if (!pdn)
276 return 0;
277
Gavin Shan4d6186c2015-10-08 14:58:58 +1100278 /* Check if the device supports capabilities */
Gavin Shanff57b452015-03-17 16:15:06 +1100279 pnv_pci_cfg_read(pdn, PCI_STATUS, 2, &status);
280 if (!(status & PCI_STATUS_CAP_LIST))
281 return 0;
282
Gavin Shanff57b452015-03-17 16:15:06 +1100283 while (cnt--) {
284 pnv_pci_cfg_read(pdn, pos, 1, &pos);
285 if (pos < 0x40)
286 break;
287
288 pos &= ~3;
289 pnv_pci_cfg_read(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
290 if (id == 0xff)
291 break;
292
293 /* Found */
294 if (id == cap)
295 return pos;
296
297 /* Next one */
298 pos += PCI_CAP_LIST_NEXT;
299 }
300
301 return 0;
302}
303
304static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap)
305{
306 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
307 u32 header;
308 int pos = 256, ttl = (4096 - 256) / 8;
309
310 if (!edev || !edev->pcie_cap)
311 return 0;
312 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
313 return 0;
314 else if (!header)
315 return 0;
316
317 while (ttl-- > 0) {
318 if (PCI_EXT_CAP_ID(header) == cap && pos)
319 return pos;
320
321 pos = PCI_EXT_CAP_NEXT(header);
322 if (pos < 256)
323 break;
324
325 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
326 break;
327 }
328
329 return 0;
330}
331
Gavin Shan29310e52013-06-20 13:21:13 +0800332/**
Gavin Shanff57b452015-03-17 16:15:06 +1100333 * pnv_eeh_probe - Do probe on PCI device
334 * @pdn: PCI device node
335 * @data: unused
Gavin Shan29310e52013-06-20 13:21:13 +0800336 *
337 * When EEH module is installed during system boot, all PCI devices
338 * are checked one by one to see if it supports EEH. The function
339 * is introduced for the purpose. By default, EEH has been enabled
340 * on all PCI devices. That's to say, we only need do necessary
341 * initialization on the corresponding eeh device and create PE
342 * accordingly.
343 *
344 * It's notable that's unsafe to retrieve the EEH device through
345 * the corresponding PCI device. During the PCI device hotplug, which
346 * was possiblly triggered by EEH core, the binding between EEH device
347 * and the PCI device isn't built yet.
348 */
Gavin Shanff57b452015-03-17 16:15:06 +1100349static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
Gavin Shan29310e52013-06-20 13:21:13 +0800350{
Gavin Shanff57b452015-03-17 16:15:06 +1100351 struct pci_controller *hose = pdn->phb;
Gavin Shan29310e52013-06-20 13:21:13 +0800352 struct pnv_phb *phb = hose->private_data;
Gavin Shanff57b452015-03-17 16:15:06 +1100353 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
354 uint32_t pcie_flags;
Mike Qiudadcd6d2014-06-26 02:58:47 -0400355 int ret;
Gavin Shan29310e52013-06-20 13:21:13 +0800356
357 /*
358 * When probing the root bridge, which doesn't have any
359 * subordinate PCI devices. We don't have OF node for
360 * the root bridge. So it's not reasonable to continue
361 * the probing.
362 */
Gavin Shanff57b452015-03-17 16:15:06 +1100363 if (!edev || edev->pe)
364 return NULL;
Gavin Shan29310e52013-06-20 13:21:13 +0800365
366 /* Skip for PCI-ISA bridge */
Gavin Shanff57b452015-03-17 16:15:06 +1100367 if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
368 return NULL;
Gavin Shan29310e52013-06-20 13:21:13 +0800369
370 /* Initialize eeh device */
Gavin Shanff57b452015-03-17 16:15:06 +1100371 edev->class_code = pdn->class_code;
Gavin Shanab55d212013-07-24 10:25:01 +0800372 edev->mode &= 0xFFFFFF00;
Gavin Shanff57b452015-03-17 16:15:06 +1100373 edev->pcix_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
374 edev->pcie_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
Wei Yang9312bc52016-03-04 10:53:09 +1100375 edev->af_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_AF);
Gavin Shanff57b452015-03-17 16:15:06 +1100376 edev->aer_cap = pnv_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
377 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
Gavin Shan4b83bd42013-07-24 10:24:59 +0800378 edev->mode |= EEH_DEV_BRIDGE;
Gavin Shanff57b452015-03-17 16:15:06 +1100379 if (edev->pcie_cap) {
380 pnv_pci_cfg_read(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
381 2, &pcie_flags);
382 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
383 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
384 edev->mode |= EEH_DEV_ROOT_PORT;
385 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
386 edev->mode |= EEH_DEV_DS_PORT;
387 }
Gavin Shan4b83bd42013-07-24 10:24:59 +0800388 }
389
Gavin Shanff57b452015-03-17 16:15:06 +1100390 edev->config_addr = (pdn->busno << 8) | (pdn->devfn);
391 edev->pe_config_addr = phb->ioda.pe_rmap[edev->config_addr];
Gavin Shan29310e52013-06-20 13:21:13 +0800392
393 /* Create PE */
Mike Qiudadcd6d2014-06-26 02:58:47 -0400394 ret = eeh_add_to_parent_pe(edev);
395 if (ret) {
Gavin Shanff57b452015-03-17 16:15:06 +1100396 pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%d)\n",
397 __func__, hose->global_number, pdn->busno,
398 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn), ret);
399 return NULL;
Mike Qiudadcd6d2014-06-26 02:58:47 -0400400 }
401
402 /*
Gavin Shanb6541db2014-10-01 17:07:53 +1000403 * If the PE contains any one of following adapters, the
404 * PCI config space can't be accessed when dumping EEH log.
405 * Otherwise, we will run into fenced PHB caused by shortage
406 * of outbound credits in the adapter. The PCI config access
407 * should be blocked until PE reset. MMIO access is dropped
408 * by hardware certainly. In order to drop PCI config requests,
409 * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which
410 * will be checked in the backend for PE state retrival. If
411 * the PE becomes frozen for the first time and the flag has
412 * been set for the PE, we will set EEH_PE_CFG_BLOCKED for
413 * that PE to block its config space.
414 *
415 * Broadcom Austin 4-ports NICs (14e4:1657)
Gavin Shan353169a2015-10-15 15:22:35 +1100416 * Broadcom Shiner 4-ports 1G NICs (14e4:168a)
Gavin Shan179ea482014-10-03 14:58:32 +1000417 * Broadcom Shiner 2-ports 10G NICs (14e4:168e)
Gavin Shanb6541db2014-10-01 17:07:53 +1000418 */
Gavin Shanff57b452015-03-17 16:15:06 +1100419 if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
420 pdn->device_id == 0x1657) ||
421 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
Gavin Shan353169a2015-10-15 15:22:35 +1100422 pdn->device_id == 0x168a) ||
423 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
Gavin Shanff57b452015-03-17 16:15:06 +1100424 pdn->device_id == 0x168e))
Gavin Shanb6541db2014-10-01 17:07:53 +1000425 edev->pe->state |= EEH_PE_CFG_RESTRICTED;
426
427 /*
Mike Qiudadcd6d2014-06-26 02:58:47 -0400428 * Cache the PE primary bus, which can't be fetched when
429 * full hotplug is in progress. In that case, all child
430 * PCI devices of the PE are expected to be removed prior
431 * to PE reset.
432 */
Gavin Shan05ba75f2016-02-09 15:50:21 +1100433 if (!(edev->pe->state & EEH_PE_PRI_BUS)) {
Gavin Shanff57b452015-03-17 16:15:06 +1100434 edev->pe->bus = pci_find_bus(hose->global_number,
435 pdn->busno);
Gavin Shan05ba75f2016-02-09 15:50:21 +1100436 if (edev->pe->bus)
437 edev->pe->state |= EEH_PE_PRI_BUS;
438 }
Gavin Shan29310e52013-06-20 13:21:13 +0800439
440 /*
441 * Enable EEH explicitly so that we will do EEH check
442 * while accessing I/O stuff
Gavin Shan29310e52013-06-20 13:21:13 +0800443 */
Gavin Shan05b17212014-07-17 14:41:38 +1000444 eeh_add_flag(EEH_ENABLED);
Gavin Shan29310e52013-06-20 13:21:13 +0800445
446 /* Save memory bars */
447 eeh_save_bars(edev);
448
Gavin Shanff57b452015-03-17 16:15:06 +1100449 return NULL;
Gavin Shan29310e52013-06-20 13:21:13 +0800450}
451
452/**
Gavin Shan01f3bfb2015-02-16 14:45:39 +1100453 * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
Gavin Shan29310e52013-06-20 13:21:13 +0800454 * @pe: EEH PE
455 * @option: operation to be issued
456 *
457 * The function is used to control the EEH functionality globally.
458 * Currently, following options are support according to PAPR:
459 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
460 */
Gavin Shan01f3bfb2015-02-16 14:45:39 +1100461static int pnv_eeh_set_option(struct eeh_pe *pe, int option)
Gavin Shan29310e52013-06-20 13:21:13 +0800462{
463 struct pci_controller *hose = pe->phb;
464 struct pnv_phb *phb = hose->private_data;
Gavin Shan7e3e4f82015-02-16 14:45:44 +1100465 bool freeze_pe = false;
Gavin Shanf94337182015-10-08 14:58:59 +1100466 int opt;
Gavin Shan7e3e4f82015-02-16 14:45:44 +1100467 s64 rc;
Gavin Shan29310e52013-06-20 13:21:13 +0800468
Gavin Shan7e3e4f82015-02-16 14:45:44 +1100469 switch (option) {
470 case EEH_OPT_DISABLE:
471 return -EPERM;
472 case EEH_OPT_ENABLE:
473 return 0;
474 case EEH_OPT_THAW_MMIO:
475 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO;
476 break;
477 case EEH_OPT_THAW_DMA:
478 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA;
479 break;
480 case EEH_OPT_FREEZE_PE:
481 freeze_pe = true;
482 opt = OPAL_EEH_ACTION_SET_FREEZE_ALL;
483 break;
484 default:
485 pr_warn("%s: Invalid option %d\n", __func__, option);
486 return -EINVAL;
487 }
488
Gavin Shanf94337182015-10-08 14:58:59 +1100489 /* Freeze master and slave PEs if PHB supports compound PEs */
Gavin Shan7e3e4f82015-02-16 14:45:44 +1100490 if (freeze_pe) {
491 if (phb->freeze_pe) {
492 phb->freeze_pe(phb, pe->addr);
Gavin Shanf94337182015-10-08 14:58:59 +1100493 return 0;
Gavin Shan7e3e4f82015-02-16 14:45:44 +1100494 }
Gavin Shanf94337182015-10-08 14:58:59 +1100495
496 rc = opal_pci_eeh_freeze_set(phb->opal_id, pe->addr, opt);
497 if (rc != OPAL_SUCCESS) {
498 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
499 __func__, rc, phb->hose->global_number,
500 pe->addr);
501 return -EIO;
Gavin Shan7e3e4f82015-02-16 14:45:44 +1100502 }
Gavin Shanf94337182015-10-08 14:58:59 +1100503
504 return 0;
Gavin Shan7e3e4f82015-02-16 14:45:44 +1100505 }
Gavin Shan29310e52013-06-20 13:21:13 +0800506
Gavin Shanf94337182015-10-08 14:58:59 +1100507 /* Unfreeze master and slave PEs if PHB supports */
508 if (phb->unfreeze_pe)
509 return phb->unfreeze_pe(phb, pe->addr, opt);
510
511 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe->addr, opt);
512 if (rc != OPAL_SUCCESS) {
513 pr_warn("%s: Failure %lld enable %d for PHB#%x-PE#%x\n",
514 __func__, rc, option, phb->hose->global_number,
515 pe->addr);
516 return -EIO;
517 }
518
519 return 0;
Gavin Shan29310e52013-06-20 13:21:13 +0800520}
521
522/**
Gavin Shan01f3bfb2015-02-16 14:45:39 +1100523 * pnv_eeh_get_pe_addr - Retrieve PE address
Gavin Shan29310e52013-06-20 13:21:13 +0800524 * @pe: EEH PE
525 *
526 * Retrieve the PE address according to the given tranditional
527 * PCI BDF (Bus/Device/Function) address.
528 */
Gavin Shan01f3bfb2015-02-16 14:45:39 +1100529static int pnv_eeh_get_pe_addr(struct eeh_pe *pe)
Gavin Shan29310e52013-06-20 13:21:13 +0800530{
531 return pe->addr;
532}
533
Gavin Shan40ae5f62015-02-16 14:45:45 +1100534static void pnv_eeh_get_phb_diag(struct eeh_pe *pe)
535{
536 struct pnv_phb *phb = pe->phb->private_data;
537 s64 rc;
538
539 rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data,
540 PNV_PCI_DIAG_BUF_SIZE);
541 if (rc != OPAL_SUCCESS)
542 pr_warn("%s: Failure %lld getting PHB#%x diag-data\n",
543 __func__, rc, pe->phb->global_number);
544}
545
546static int pnv_eeh_get_phb_state(struct eeh_pe *pe)
547{
548 struct pnv_phb *phb = pe->phb->private_data;
549 u8 fstate;
550 __be16 pcierr;
551 s64 rc;
552 int result = 0;
553
554 rc = opal_pci_eeh_freeze_status(phb->opal_id,
555 pe->addr,
556 &fstate,
557 &pcierr,
558 NULL);
559 if (rc != OPAL_SUCCESS) {
560 pr_warn("%s: Failure %lld getting PHB#%x state\n",
561 __func__, rc, phb->hose->global_number);
562 return EEH_STATE_NOT_SUPPORT;
563 }
564
565 /*
566 * Check PHB state. If the PHB is frozen for the
567 * first time, to dump the PHB diag-data.
568 */
569 if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) {
570 result = (EEH_STATE_MMIO_ACTIVE |
571 EEH_STATE_DMA_ACTIVE |
572 EEH_STATE_MMIO_ENABLED |
573 EEH_STATE_DMA_ENABLED);
574 } else if (!(pe->state & EEH_PE_ISOLATED)) {
575 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
576 pnv_eeh_get_phb_diag(pe);
577
578 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
579 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
580 }
581
582 return result;
583}
584
585static int pnv_eeh_get_pe_state(struct eeh_pe *pe)
586{
587 struct pnv_phb *phb = pe->phb->private_data;
588 u8 fstate;
589 __be16 pcierr;
590 s64 rc;
591 int result;
592
593 /*
594 * We don't clobber hardware frozen state until PE
595 * reset is completed. In order to keep EEH core
596 * moving forward, we have to return operational
597 * state during PE reset.
598 */
599 if (pe->state & EEH_PE_RESET) {
600 result = (EEH_STATE_MMIO_ACTIVE |
601 EEH_STATE_DMA_ACTIVE |
602 EEH_STATE_MMIO_ENABLED |
603 EEH_STATE_DMA_ENABLED);
604 return result;
605 }
606
607 /*
608 * Fetch PE state from hardware. If the PHB
609 * supports compound PE, let it handle that.
610 */
611 if (phb->get_pe_state) {
612 fstate = phb->get_pe_state(phb, pe->addr);
613 } else {
614 rc = opal_pci_eeh_freeze_status(phb->opal_id,
615 pe->addr,
616 &fstate,
617 &pcierr,
618 NULL);
619 if (rc != OPAL_SUCCESS) {
620 pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n",
621 __func__, rc, phb->hose->global_number,
622 pe->addr);
623 return EEH_STATE_NOT_SUPPORT;
624 }
625 }
626
627 /* Figure out state */
628 switch (fstate) {
629 case OPAL_EEH_STOPPED_NOT_FROZEN:
630 result = (EEH_STATE_MMIO_ACTIVE |
631 EEH_STATE_DMA_ACTIVE |
632 EEH_STATE_MMIO_ENABLED |
633 EEH_STATE_DMA_ENABLED);
634 break;
635 case OPAL_EEH_STOPPED_MMIO_FREEZE:
636 result = (EEH_STATE_DMA_ACTIVE |
637 EEH_STATE_DMA_ENABLED);
638 break;
639 case OPAL_EEH_STOPPED_DMA_FREEZE:
640 result = (EEH_STATE_MMIO_ACTIVE |
641 EEH_STATE_MMIO_ENABLED);
642 break;
643 case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
644 result = 0;
645 break;
646 case OPAL_EEH_STOPPED_RESET:
647 result = EEH_STATE_RESET_ACTIVE;
648 break;
649 case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
650 result = EEH_STATE_UNAVAILABLE;
651 break;
652 case OPAL_EEH_STOPPED_PERM_UNAVAIL:
653 result = EEH_STATE_NOT_SUPPORT;
654 break;
655 default:
656 result = EEH_STATE_NOT_SUPPORT;
657 pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n",
658 __func__, phb->hose->global_number,
659 pe->addr, fstate);
660 }
661
662 /*
663 * If PHB supports compound PE, to freeze all
664 * slave PEs for consistency.
665 *
666 * If the PE is switching to frozen state for the
667 * first time, to dump the PHB diag-data.
668 */
669 if (!(result & EEH_STATE_NOT_SUPPORT) &&
670 !(result & EEH_STATE_UNAVAILABLE) &&
671 !(result & EEH_STATE_MMIO_ACTIVE) &&
672 !(result & EEH_STATE_DMA_ACTIVE) &&
673 !(pe->state & EEH_PE_ISOLATED)) {
674 if (phb->freeze_pe)
675 phb->freeze_pe(phb, pe->addr);
676
677 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
678 pnv_eeh_get_phb_diag(pe);
679
680 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
681 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
682 }
683
684 return result;
685}
686
Gavin Shan29310e52013-06-20 13:21:13 +0800687/**
Gavin Shan01f3bfb2015-02-16 14:45:39 +1100688 * pnv_eeh_get_state - Retrieve PE state
Gavin Shan29310e52013-06-20 13:21:13 +0800689 * @pe: EEH PE
690 * @delay: delay while PE state is temporarily unavailable
691 *
692 * Retrieve the state of the specified PE. For IODA-compitable
693 * platform, it should be retrieved from IODA table. Therefore,
694 * we prefer passing down to hardware implementation to handle
695 * it.
696 */
Gavin Shan01f3bfb2015-02-16 14:45:39 +1100697static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay)
Gavin Shan29310e52013-06-20 13:21:13 +0800698{
Gavin Shan40ae5f62015-02-16 14:45:45 +1100699 int ret;
Gavin Shan29310e52013-06-20 13:21:13 +0800700
Gavin Shan40ae5f62015-02-16 14:45:45 +1100701 if (pe->type & EEH_PE_PHB)
702 ret = pnv_eeh_get_phb_state(pe);
703 else
704 ret = pnv_eeh_get_pe_state(pe);
Gavin Shan29310e52013-06-20 13:21:13 +0800705
Gavin Shan40ae5f62015-02-16 14:45:45 +1100706 if (!delay)
707 return ret;
708
709 /*
710 * If the PE state is temporarily unavailable,
711 * to inform the EEH core delay for default
712 * period (1 second)
713 */
714 *delay = 0;
715 if (ret & EEH_STATE_UNAVAILABLE)
716 *delay = 1000;
Gavin Shan29310e52013-06-20 13:21:13 +0800717
718 return ret;
719}
720
Gavin Shanebe22532016-05-20 16:41:38 +1000721static s64 pnv_eeh_poll(unsigned long id)
Gavin Shancadf3642015-02-16 14:45:47 +1100722{
723 s64 rc = OPAL_HARDWARE;
724
725 while (1) {
Gavin Shanebe22532016-05-20 16:41:38 +1000726 rc = opal_pci_poll(id);
Gavin Shancadf3642015-02-16 14:45:47 +1100727 if (rc <= 0)
728 break;
729
730 if (system_state < SYSTEM_RUNNING)
731 udelay(1000 * rc);
732 else
733 msleep(rc);
734 }
735
736 return rc;
737}
738
739int pnv_eeh_phb_reset(struct pci_controller *hose, int option)
740{
741 struct pnv_phb *phb = hose->private_data;
742 s64 rc = OPAL_HARDWARE;
743
744 pr_debug("%s: Reset PHB#%x, option=%d\n",
745 __func__, hose->global_number, option);
746
747 /* Issue PHB complete reset request */
748 if (option == EEH_RESET_FUNDAMENTAL ||
749 option == EEH_RESET_HOT)
750 rc = opal_pci_reset(phb->opal_id,
751 OPAL_RESET_PHB_COMPLETE,
752 OPAL_ASSERT_RESET);
753 else if (option == EEH_RESET_DEACTIVATE)
754 rc = opal_pci_reset(phb->opal_id,
755 OPAL_RESET_PHB_COMPLETE,
756 OPAL_DEASSERT_RESET);
757 if (rc < 0)
758 goto out;
759
760 /*
761 * Poll state of the PHB until the request is done
762 * successfully. The PHB reset is usually PHB complete
763 * reset followed by hot reset on root bus. So we also
764 * need the PCI bus settlement delay.
765 */
Gavin Shanfbce44d2016-06-24 16:44:19 +1000766 if (rc > 0)
767 rc = pnv_eeh_poll(phb->opal_id);
Gavin Shancadf3642015-02-16 14:45:47 +1100768 if (option == EEH_RESET_DEACTIVATE) {
769 if (system_state < SYSTEM_RUNNING)
770 udelay(1000 * EEH_PE_RST_SETTLE_TIME);
771 else
772 msleep(EEH_PE_RST_SETTLE_TIME);
773 }
774out:
775 if (rc != OPAL_SUCCESS)
776 return -EIO;
777
778 return 0;
779}
780
781static int pnv_eeh_root_reset(struct pci_controller *hose, int option)
782{
783 struct pnv_phb *phb = hose->private_data;
784 s64 rc = OPAL_HARDWARE;
785
786 pr_debug("%s: Reset PHB#%x, option=%d\n",
787 __func__, hose->global_number, option);
788
789 /*
790 * During the reset deassert time, we needn't care
791 * the reset scope because the firmware does nothing
792 * for fundamental or hot reset during deassert phase.
793 */
794 if (option == EEH_RESET_FUNDAMENTAL)
795 rc = opal_pci_reset(phb->opal_id,
796 OPAL_RESET_PCI_FUNDAMENTAL,
797 OPAL_ASSERT_RESET);
798 else if (option == EEH_RESET_HOT)
799 rc = opal_pci_reset(phb->opal_id,
800 OPAL_RESET_PCI_HOT,
801 OPAL_ASSERT_RESET);
802 else if (option == EEH_RESET_DEACTIVATE)
803 rc = opal_pci_reset(phb->opal_id,
804 OPAL_RESET_PCI_HOT,
805 OPAL_DEASSERT_RESET);
806 if (rc < 0)
807 goto out;
808
809 /* Poll state of the PHB until the request is done */
Gavin Shanfbce44d2016-06-24 16:44:19 +1000810 if (rc > 0)
811 rc = pnv_eeh_poll(phb->opal_id);
Gavin Shancadf3642015-02-16 14:45:47 +1100812 if (option == EEH_RESET_DEACTIVATE)
813 msleep(EEH_PE_RST_SETTLE_TIME);
814out:
815 if (rc != OPAL_SUCCESS)
816 return -EIO;
817
818 return 0;
819}
820
Gavin Shan9c0e1ec2016-05-20 16:41:39 +1000821static int __pnv_eeh_bridge_reset(struct pci_dev *dev, int option)
Gavin Shancadf3642015-02-16 14:45:47 +1100822{
Gavin Shan0bd78582015-03-17 16:15:07 +1100823 struct pci_dn *pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
824 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
Gavin Shancadf3642015-02-16 14:45:47 +1100825 int aer = edev ? edev->aer_cap : 0;
826 u32 ctrl;
827
828 pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n",
829 __func__, pci_domain_nr(dev->bus),
830 dev->bus->number, option);
831
832 switch (option) {
833 case EEH_RESET_FUNDAMENTAL:
834 case EEH_RESET_HOT:
835 /* Don't report linkDown event */
836 if (aer) {
Gavin Shan0bd78582015-03-17 16:15:07 +1100837 eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
Gavin Shancadf3642015-02-16 14:45:47 +1100838 4, &ctrl);
839 ctrl |= PCI_ERR_UNC_SURPDN;
Gavin Shan0bd78582015-03-17 16:15:07 +1100840 eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
Gavin Shancadf3642015-02-16 14:45:47 +1100841 4, ctrl);
842 }
843
Gavin Shan0bd78582015-03-17 16:15:07 +1100844 eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
Gavin Shancadf3642015-02-16 14:45:47 +1100845 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
Gavin Shan0bd78582015-03-17 16:15:07 +1100846 eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
Gavin Shancadf3642015-02-16 14:45:47 +1100847
848 msleep(EEH_PE_RST_HOLD_TIME);
849 break;
850 case EEH_RESET_DEACTIVATE:
Gavin Shan0bd78582015-03-17 16:15:07 +1100851 eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
Gavin Shancadf3642015-02-16 14:45:47 +1100852 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
Gavin Shan0bd78582015-03-17 16:15:07 +1100853 eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
Gavin Shancadf3642015-02-16 14:45:47 +1100854
855 msleep(EEH_PE_RST_SETTLE_TIME);
856
857 /* Continue reporting linkDown event */
858 if (aer) {
Gavin Shan0bd78582015-03-17 16:15:07 +1100859 eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
Gavin Shancadf3642015-02-16 14:45:47 +1100860 4, &ctrl);
861 ctrl &= ~PCI_ERR_UNC_SURPDN;
Gavin Shan0bd78582015-03-17 16:15:07 +1100862 eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
Gavin Shancadf3642015-02-16 14:45:47 +1100863 4, ctrl);
864 }
865
866 break;
867 }
868
869 return 0;
870}
871
Gavin Shan9c0e1ec2016-05-20 16:41:39 +1000872static int pnv_eeh_bridge_reset(struct pci_dev *pdev, int option)
873{
874 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
875 struct pnv_phb *phb = hose->private_data;
876 struct device_node *dn = pci_device_to_OF_node(pdev);
877 uint64_t id = PCI_SLOT_ID(phb->opal_id,
878 (pdev->bus->number << 8) | pdev->devfn);
879 uint8_t scope;
880 int64_t rc;
881
882 /* Hot reset to the bus if firmware cannot handle */
883 if (!dn || !of_get_property(dn, "ibm,reset-by-firmware", NULL))
884 return __pnv_eeh_bridge_reset(pdev, option);
885
886 switch (option) {
887 case EEH_RESET_FUNDAMENTAL:
888 scope = OPAL_RESET_PCI_FUNDAMENTAL;
889 break;
890 case EEH_RESET_HOT:
891 scope = OPAL_RESET_PCI_HOT;
892 break;
893 case EEH_RESET_DEACTIVATE:
894 return 0;
895 default:
896 dev_dbg(&pdev->dev, "%s: Unsupported reset %d\n",
897 __func__, option);
898 return -EINVAL;
899 }
900
901 rc = opal_pci_reset(id, scope, OPAL_ASSERT_RESET);
902 if (rc <= OPAL_SUCCESS)
903 goto out;
904
905 rc = pnv_eeh_poll(id);
906out:
907 return (rc == OPAL_SUCCESS) ? 0 : -EIO;
908}
909
Gavin Shancadf3642015-02-16 14:45:47 +1100910void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
911{
Michael Ellerman848912e2016-05-12 19:43:37 +1000912 struct pci_controller *hose;
913
914 if (pci_is_root_bus(dev->bus)) {
915 hose = pci_bus_to_host(dev->bus);
916 pnv_eeh_root_reset(hose, EEH_RESET_HOT);
917 pnv_eeh_root_reset(hose, EEH_RESET_DEACTIVATE);
918 } else {
919 pnv_eeh_bridge_reset(dev, EEH_RESET_HOT);
920 pnv_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE);
921 }
Gavin Shancadf3642015-02-16 14:45:47 +1100922}
923
Wei Yang9312bc52016-03-04 10:53:09 +1100924static void pnv_eeh_wait_for_pending(struct pci_dn *pdn, const char *type,
925 int pos, u16 mask)
926{
927 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
928 int i, status = 0;
929
930 /* Wait for Transaction Pending bit to be cleared */
931 for (i = 0; i < 4; i++) {
932 eeh_ops->read_config(pdn, pos, 2, &status);
933 if (!(status & mask))
934 return;
935
936 msleep((1 << i) * 100);
937 }
938
939 pr_warn("%s: Pending transaction while issuing %sFLR to %04x:%02x:%02x.%01x\n",
940 __func__, type,
941 edev->phb->global_number, pdn->busno,
942 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
943}
944
945static int pnv_eeh_do_flr(struct pci_dn *pdn, int option)
946{
947 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
948 u32 reg = 0;
949
950 if (WARN_ON(!edev->pcie_cap))
951 return -ENOTTY;
952
953 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP, 4, &reg);
954 if (!(reg & PCI_EXP_DEVCAP_FLR))
955 return -ENOTTY;
956
957 switch (option) {
958 case EEH_RESET_HOT:
959 case EEH_RESET_FUNDAMENTAL:
960 pnv_eeh_wait_for_pending(pdn, "",
961 edev->pcie_cap + PCI_EXP_DEVSTA,
962 PCI_EXP_DEVSTA_TRPND);
963 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
964 4, &reg);
965 reg |= PCI_EXP_DEVCTL_BCR_FLR;
966 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
967 4, reg);
968 msleep(EEH_PE_RST_HOLD_TIME);
969 break;
970 case EEH_RESET_DEACTIVATE:
971 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
972 4, &reg);
973 reg &= ~PCI_EXP_DEVCTL_BCR_FLR;
974 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
975 4, reg);
976 msleep(EEH_PE_RST_SETTLE_TIME);
977 break;
978 }
979
980 return 0;
981}
982
983static int pnv_eeh_do_af_flr(struct pci_dn *pdn, int option)
984{
985 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
986 u32 cap = 0;
987
988 if (WARN_ON(!edev->af_cap))
989 return -ENOTTY;
990
991 eeh_ops->read_config(pdn, edev->af_cap + PCI_AF_CAP, 1, &cap);
992 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
993 return -ENOTTY;
994
995 switch (option) {
996 case EEH_RESET_HOT:
997 case EEH_RESET_FUNDAMENTAL:
998 /*
999 * Wait for Transaction Pending bit to clear. A word-aligned
1000 * test is used, so we use the conrol offset rather than status
1001 * and shift the test bit to match.
1002 */
1003 pnv_eeh_wait_for_pending(pdn, "AF",
1004 edev->af_cap + PCI_AF_CTRL,
1005 PCI_AF_STATUS_TP << 8);
1006 eeh_ops->write_config(pdn, edev->af_cap + PCI_AF_CTRL,
1007 1, PCI_AF_CTRL_FLR);
1008 msleep(EEH_PE_RST_HOLD_TIME);
1009 break;
1010 case EEH_RESET_DEACTIVATE:
1011 eeh_ops->write_config(pdn, edev->af_cap + PCI_AF_CTRL, 1, 0);
1012 msleep(EEH_PE_RST_SETTLE_TIME);
1013 break;
1014 }
1015
1016 return 0;
1017}
1018
1019static int pnv_eeh_reset_vf_pe(struct eeh_pe *pe, int option)
1020{
1021 struct eeh_dev *edev;
1022 struct pci_dn *pdn;
1023 int ret;
1024
1025 /* The VF PE should have only one child device */
1026 edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, list);
1027 pdn = eeh_dev_to_pdn(edev);
1028 if (!pdn)
1029 return -ENXIO;
1030
1031 ret = pnv_eeh_do_flr(pdn, option);
1032 if (!ret)
1033 return ret;
1034
1035 return pnv_eeh_do_af_flr(pdn, option);
1036}
1037
Gavin Shan29310e52013-06-20 13:21:13 +08001038/**
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001039 * pnv_eeh_reset - Reset the specified PE
Gavin Shan29310e52013-06-20 13:21:13 +08001040 * @pe: EEH PE
1041 * @option: reset option
1042 *
Gavin Shancadf3642015-02-16 14:45:47 +11001043 * Do reset on the indicated PE. For PCI bus sensitive PE,
1044 * we need to reset the parent p2p bridge. The PHB has to
1045 * be reinitialized if the p2p bridge is root bridge. For
1046 * PCI device sensitive PE, we will try to reset the device
1047 * through FLR. For now, we don't have OPAL APIs to do HARD
1048 * reset yet, so all reset would be SOFT (HOT) reset.
Gavin Shan29310e52013-06-20 13:21:13 +08001049 */
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001050static int pnv_eeh_reset(struct eeh_pe *pe, int option)
Gavin Shan29310e52013-06-20 13:21:13 +08001051{
1052 struct pci_controller *hose = pe->phb;
Gavin Shan4fad4942016-05-03 15:41:44 +10001053 struct pnv_phb *phb;
Gavin Shancadf3642015-02-16 14:45:47 +11001054 struct pci_bus *bus;
Gavin Shan4fad4942016-05-03 15:41:44 +10001055 int64_t rc;
Gavin Shan29310e52013-06-20 13:21:13 +08001056
Gavin Shancadf3642015-02-16 14:45:47 +11001057 /*
1058 * For PHB reset, we always have complete reset. For those PEs whose
1059 * primary bus derived from root complex (root bus) or root port
1060 * (usually bus#1), we apply hot or fundamental reset on the root port.
1061 * For other PEs, we always have hot reset on the PE primary bus.
1062 *
1063 * Here, we have different design to pHyp, which always clear the
1064 * frozen state during PE reset. However, the good idea here from
1065 * benh is to keep frozen state before we get PE reset done completely
1066 * (until BAR restore). With the frozen state, HW drops illegal IO
1067 * or MMIO access, which can incur recrusive frozen PE during PE
1068 * reset. The side effect is that EEH core has to clear the frozen
1069 * state explicitly after BAR restore.
1070 */
Gavin Shan4fad4942016-05-03 15:41:44 +10001071 if (pe->type & EEH_PE_PHB)
1072 return pnv_eeh_phb_reset(hose, option);
Gavin Shancadf3642015-02-16 14:45:47 +11001073
Gavin Shan4fad4942016-05-03 15:41:44 +10001074 /*
1075 * The frozen PE might be caused by PAPR error injection
1076 * registers, which are expected to be cleared after hitting
1077 * frozen PE as stated in the hardware spec. Unfortunately,
1078 * that's not true on P7IOC. So we have to clear it manually
1079 * to avoid recursive EEH errors during recovery.
1080 */
1081 phb = hose->private_data;
1082 if (phb->model == PNV_PHB_MODEL_P7IOC &&
1083 (option == EEH_RESET_HOT ||
1084 option == EEH_RESET_FUNDAMENTAL)) {
1085 rc = opal_pci_reset(phb->opal_id,
1086 OPAL_RESET_PHB_ERROR,
1087 OPAL_ASSERT_RESET);
1088 if (rc != OPAL_SUCCESS) {
1089 pr_warn("%s: Failure %lld clearing error injection registers\n",
1090 __func__, rc);
1091 return -EIO;
Gavin Shancadf3642015-02-16 14:45:47 +11001092 }
Gavin Shancadf3642015-02-16 14:45:47 +11001093 }
Gavin Shan29310e52013-06-20 13:21:13 +08001094
Russell Curreye98ddb72016-09-12 14:17:23 +10001095 if (pe->type & EEH_PE_VF)
1096 return pnv_eeh_reset_vf_pe(pe, option);
1097
Gavin Shan4fad4942016-05-03 15:41:44 +10001098 bus = eeh_pe_bus_get(pe);
Russell Currey04fec21c2016-09-12 14:17:22 +10001099 if (!bus) {
1100 pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
1101 __func__, pe->phb->global_number, pe->addr);
1102 return -EIO;
1103 }
Gavin Shan4fad4942016-05-03 15:41:44 +10001104
1105 if (pci_is_root_bus(bus) ||
1106 pci_is_root_bus(bus->parent))
1107 return pnv_eeh_root_reset(hose, option);
1108
1109 return pnv_eeh_bridge_reset(bus->self, option);
Gavin Shan29310e52013-06-20 13:21:13 +08001110}
1111
1112/**
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001113 * pnv_eeh_wait_state - Wait for PE state
Gavin Shan29310e52013-06-20 13:21:13 +08001114 * @pe: EEH PE
Wei Yang2ac39902015-04-27 09:25:10 +08001115 * @max_wait: maximal period in millisecond
Gavin Shan29310e52013-06-20 13:21:13 +08001116 *
1117 * Wait for the state of associated PE. It might take some time
1118 * to retrieve the PE's state.
1119 */
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001120static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
Gavin Shan29310e52013-06-20 13:21:13 +08001121{
1122 int ret;
1123 int mwait;
1124
1125 while (1) {
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001126 ret = pnv_eeh_get_state(pe, &mwait);
Gavin Shan29310e52013-06-20 13:21:13 +08001127
1128 /*
1129 * If the PE's state is temporarily unavailable,
1130 * we have to wait for the specified time. Otherwise,
1131 * the PE's state will be returned immediately.
1132 */
1133 if (ret != EEH_STATE_UNAVAILABLE)
1134 return ret;
1135
Gavin Shan29310e52013-06-20 13:21:13 +08001136 if (max_wait <= 0) {
Gavin Shan0dae2742014-07-17 14:41:41 +10001137 pr_warn("%s: Timeout getting PE#%x's state (%d)\n",
1138 __func__, pe->addr, max_wait);
Gavin Shan29310e52013-06-20 13:21:13 +08001139 return EEH_STATE_NOT_SUPPORT;
1140 }
1141
Wei Yange17866d2015-04-27 09:25:11 +08001142 max_wait -= mwait;
Gavin Shan29310e52013-06-20 13:21:13 +08001143 msleep(mwait);
1144 }
1145
1146 return EEH_STATE_NOT_SUPPORT;
1147}
1148
1149/**
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001150 * pnv_eeh_get_log - Retrieve error log
Gavin Shan29310e52013-06-20 13:21:13 +08001151 * @pe: EEH PE
1152 * @severity: temporary or permanent error log
1153 * @drv_log: driver log to be combined with retrieved error log
1154 * @len: length of driver log
1155 *
1156 * Retrieve the temporary or permanent error from the PE.
1157 */
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001158static int pnv_eeh_get_log(struct eeh_pe *pe, int severity,
1159 char *drv_log, unsigned long len)
Gavin Shan29310e52013-06-20 13:21:13 +08001160{
Gavin Shan95edcde2015-02-16 14:45:42 +11001161 if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
1162 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
Gavin Shan29310e52013-06-20 13:21:13 +08001163
Gavin Shan95edcde2015-02-16 14:45:42 +11001164 return 0;
Gavin Shan29310e52013-06-20 13:21:13 +08001165}
1166
1167/**
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001168 * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
Gavin Shan29310e52013-06-20 13:21:13 +08001169 * @pe: EEH PE
1170 *
1171 * The function will be called to reconfigure the bridges included
1172 * in the specified PE so that the mulfunctional PE would be recovered
1173 * again.
1174 */
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001175static int pnv_eeh_configure_bridge(struct eeh_pe *pe)
Gavin Shan29310e52013-06-20 13:21:13 +08001176{
Gavin Shanbbe170e2015-02-16 14:45:43 +11001177 return 0;
Gavin Shan29310e52013-06-20 13:21:13 +08001178}
1179
1180/**
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001181 * pnv_pe_err_inject - Inject specified error to the indicated PE
Gavin Shan131c1232014-09-30 12:38:56 +10001182 * @pe: the indicated PE
1183 * @type: error type
1184 * @func: specific error type
1185 * @addr: address
1186 * @mask: address mask
1187 *
1188 * The routine is called to inject specified error, which is
1189 * determined by @type and @func, to the indicated PE for
1190 * testing purpose.
1191 */
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001192static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
1193 unsigned long addr, unsigned long mask)
Gavin Shan131c1232014-09-30 12:38:56 +10001194{
1195 struct pci_controller *hose = pe->phb;
1196 struct pnv_phb *phb = hose->private_data;
Gavin Shanfa646c32015-02-16 14:45:40 +11001197 s64 rc;
Gavin Shan131c1232014-09-30 12:38:56 +10001198
Gavin Shanfa646c32015-02-16 14:45:40 +11001199 if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR &&
1200 type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) {
1201 pr_warn("%s: Invalid error type %d\n",
1202 __func__, type);
1203 return -ERANGE;
1204 }
Gavin Shan131c1232014-09-30 12:38:56 +10001205
Gavin Shanfa646c32015-02-16 14:45:40 +11001206 if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR ||
1207 func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) {
1208 pr_warn("%s: Invalid error function %d\n",
1209 __func__, func);
1210 return -ERANGE;
1211 }
1212
1213 /* Firmware supports error injection ? */
1214 if (!opal_check_token(OPAL_PCI_ERR_INJECT)) {
1215 pr_warn("%s: Firmware doesn't support error injection\n",
1216 __func__);
1217 return -ENXIO;
1218 }
1219
1220 /* Do error injection */
1221 rc = opal_pci_err_inject(phb->opal_id, pe->addr,
1222 type, func, addr, mask);
1223 if (rc != OPAL_SUCCESS) {
1224 pr_warn("%s: Failure %lld injecting error "
1225 "%d-%d to PHB#%x-PE#%x\n",
1226 __func__, rc, type, func,
1227 hose->global_number, pe->addr);
1228 return -EIO;
1229 }
1230
1231 return 0;
Gavin Shan131c1232014-09-30 12:38:56 +10001232}
1233
Gavin Shan0bd78582015-03-17 16:15:07 +11001234static inline bool pnv_eeh_cfg_blocked(struct pci_dn *pdn)
Gavin Shand2cfbcd2014-10-01 17:07:51 +10001235{
Gavin Shan0bd78582015-03-17 16:15:07 +11001236 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
Gavin Shand2cfbcd2014-10-01 17:07:51 +10001237
1238 if (!edev || !edev->pe)
1239 return false;
1240
Wei Yang9312bc52016-03-04 10:53:09 +11001241 /*
1242 * We will issue FLR or AF FLR to all VFs, which are contained
1243 * in VF PE. It relies on the EEH PCI config accessors. So we
1244 * can't block them during the window.
1245 */
1246 if (edev->physfn && (edev->pe->state & EEH_PE_RESET))
1247 return false;
1248
Gavin Shand2cfbcd2014-10-01 17:07:51 +10001249 if (edev->pe->state & EEH_PE_CFG_BLOCKED)
1250 return true;
1251
1252 return false;
1253}
1254
Gavin Shan0bd78582015-03-17 16:15:07 +11001255static int pnv_eeh_read_config(struct pci_dn *pdn,
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001256 int where, int size, u32 *val)
Gavin Shand2cfbcd2014-10-01 17:07:51 +10001257{
Gavin Shan3532a7412015-03-17 16:15:03 +11001258 if (!pdn)
1259 return PCIBIOS_DEVICE_NOT_FOUND;
1260
Gavin Shan0bd78582015-03-17 16:15:07 +11001261 if (pnv_eeh_cfg_blocked(pdn)) {
Gavin Shand2cfbcd2014-10-01 17:07:51 +10001262 *val = 0xFFFFFFFF;
1263 return PCIBIOS_SET_FAILED;
1264 }
1265
Gavin Shan3532a7412015-03-17 16:15:03 +11001266 return pnv_pci_cfg_read(pdn, where, size, val);
Gavin Shand2cfbcd2014-10-01 17:07:51 +10001267}
1268
Gavin Shan0bd78582015-03-17 16:15:07 +11001269static int pnv_eeh_write_config(struct pci_dn *pdn,
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001270 int where, int size, u32 val)
Gavin Shand2cfbcd2014-10-01 17:07:51 +10001271{
Gavin Shan3532a7412015-03-17 16:15:03 +11001272 if (!pdn)
1273 return PCIBIOS_DEVICE_NOT_FOUND;
1274
Gavin Shan0bd78582015-03-17 16:15:07 +11001275 if (pnv_eeh_cfg_blocked(pdn))
Gavin Shand2cfbcd2014-10-01 17:07:51 +10001276 return PCIBIOS_SET_FAILED;
1277
Gavin Shan3532a7412015-03-17 16:15:03 +11001278 return pnv_pci_cfg_write(pdn, where, size, val);
Gavin Shand2cfbcd2014-10-01 17:07:51 +10001279}
1280
Gavin Shan2a485ad2015-02-16 14:45:46 +11001281static void pnv_eeh_dump_hub_diag_common(struct OpalIoP7IOCErrorData *data)
1282{
1283 /* GEM */
1284 if (data->gemXfir || data->gemRfir ||
1285 data->gemRirqfir || data->gemMask || data->gemRwof)
1286 pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n",
1287 be64_to_cpu(data->gemXfir),
1288 be64_to_cpu(data->gemRfir),
1289 be64_to_cpu(data->gemRirqfir),
1290 be64_to_cpu(data->gemMask),
1291 be64_to_cpu(data->gemRwof));
1292
1293 /* LEM */
1294 if (data->lemFir || data->lemErrMask ||
1295 data->lemAction0 || data->lemAction1 || data->lemWof)
1296 pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n",
1297 be64_to_cpu(data->lemFir),
1298 be64_to_cpu(data->lemErrMask),
1299 be64_to_cpu(data->lemAction0),
1300 be64_to_cpu(data->lemAction1),
1301 be64_to_cpu(data->lemWof));
1302}
1303
1304static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose)
1305{
1306 struct pnv_phb *phb = hose->private_data;
1307 struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
1308 long rc;
1309
1310 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
1311 if (rc != OPAL_SUCCESS) {
1312 pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n",
1313 __func__, phb->hub_id, rc);
1314 return;
1315 }
1316
Gavin Shana7032132016-08-02 14:10:30 +10001317 switch (be16_to_cpu(data->type)) {
Gavin Shan2a485ad2015-02-16 14:45:46 +11001318 case OPAL_P7IOC_DIAG_TYPE_RGC:
1319 pr_info("P7IOC diag-data for RGC\n\n");
1320 pnv_eeh_dump_hub_diag_common(data);
1321 if (data->rgc.rgcStatus || data->rgc.rgcLdcp)
1322 pr_info(" RGC: %016llx %016llx\n",
1323 be64_to_cpu(data->rgc.rgcStatus),
1324 be64_to_cpu(data->rgc.rgcLdcp));
1325 break;
1326 case OPAL_P7IOC_DIAG_TYPE_BI:
1327 pr_info("P7IOC diag-data for BI %s\n\n",
1328 data->bi.biDownbound ? "Downbound" : "Upbound");
1329 pnv_eeh_dump_hub_diag_common(data);
1330 if (data->bi.biLdcp0 || data->bi.biLdcp1 ||
1331 data->bi.biLdcp2 || data->bi.biFenceStatus)
1332 pr_info(" BI: %016llx %016llx %016llx %016llx\n",
1333 be64_to_cpu(data->bi.biLdcp0),
1334 be64_to_cpu(data->bi.biLdcp1),
1335 be64_to_cpu(data->bi.biLdcp2),
1336 be64_to_cpu(data->bi.biFenceStatus));
1337 break;
1338 case OPAL_P7IOC_DIAG_TYPE_CI:
1339 pr_info("P7IOC diag-data for CI Port %d\n\n",
1340 data->ci.ciPort);
1341 pnv_eeh_dump_hub_diag_common(data);
1342 if (data->ci.ciPortStatus || data->ci.ciPortLdcp)
1343 pr_info(" CI: %016llx %016llx\n",
1344 be64_to_cpu(data->ci.ciPortStatus),
1345 be64_to_cpu(data->ci.ciPortLdcp));
1346 break;
1347 case OPAL_P7IOC_DIAG_TYPE_MISC:
1348 pr_info("P7IOC diag-data for MISC\n\n");
1349 pnv_eeh_dump_hub_diag_common(data);
1350 break;
1351 case OPAL_P7IOC_DIAG_TYPE_I2C:
1352 pr_info("P7IOC diag-data for I2C\n\n");
1353 pnv_eeh_dump_hub_diag_common(data);
1354 break;
1355 default:
1356 pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n",
1357 __func__, phb->hub_id, data->type);
1358 }
1359}
1360
1361static int pnv_eeh_get_pe(struct pci_controller *hose,
1362 u16 pe_no, struct eeh_pe **pe)
1363{
1364 struct pnv_phb *phb = hose->private_data;
1365 struct pnv_ioda_pe *pnv_pe;
1366 struct eeh_pe *dev_pe;
1367 struct eeh_dev edev;
1368
1369 /*
1370 * If PHB supports compound PE, to fetch
1371 * the master PE because slave PE is invisible
1372 * to EEH core.
1373 */
1374 pnv_pe = &phb->ioda.pe_array[pe_no];
1375 if (pnv_pe->flags & PNV_IODA_PE_SLAVE) {
1376 pnv_pe = pnv_pe->master;
1377 WARN_ON(!pnv_pe ||
1378 !(pnv_pe->flags & PNV_IODA_PE_MASTER));
1379 pe_no = pnv_pe->pe_number;
1380 }
1381
1382 /* Find the PE according to PE# */
1383 memset(&edev, 0, sizeof(struct eeh_dev));
1384 edev.phb = hose;
1385 edev.pe_config_addr = pe_no;
1386 dev_pe = eeh_pe_get(&edev);
1387 if (!dev_pe)
1388 return -EEXIST;
1389
1390 /* Freeze the (compound) PE */
1391 *pe = dev_pe;
1392 if (!(dev_pe->state & EEH_PE_ISOLATED))
1393 phb->freeze_pe(phb, pe_no);
1394
1395 /*
1396 * At this point, we're sure the (compound) PE should
1397 * have been frozen. However, we still need poke until
1398 * hitting the frozen PE on top level.
1399 */
1400 dev_pe = dev_pe->parent;
1401 while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) {
1402 int ret;
1403 int active_flags = (EEH_STATE_MMIO_ACTIVE |
1404 EEH_STATE_DMA_ACTIVE);
1405
1406 ret = eeh_ops->get_state(dev_pe, NULL);
1407 if (ret <= 0 || (ret & active_flags) == active_flags) {
1408 dev_pe = dev_pe->parent;
1409 continue;
1410 }
1411
1412 /* Frozen parent PE */
1413 *pe = dev_pe;
1414 if (!(dev_pe->state & EEH_PE_ISOLATED))
1415 phb->freeze_pe(phb, dev_pe->addr);
1416
1417 /* Next one */
1418 dev_pe = dev_pe->parent;
1419 }
1420
1421 return 0;
1422}
1423
Gavin Shan131c1232014-09-30 12:38:56 +10001424/**
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001425 * pnv_eeh_next_error - Retrieve next EEH error to handle
Gavin Shan29310e52013-06-20 13:21:13 +08001426 * @pe: Affected PE
1427 *
Gavin Shan2a485ad2015-02-16 14:45:46 +11001428 * The function is expected to be called by EEH core while it gets
1429 * special EEH event (without binding PE). The function calls to
1430 * OPAL APIs for next error to handle. The informational error is
1431 * handled internally by platform. However, the dead IOC, dead PHB,
1432 * fenced PHB and frozen PE should be handled by EEH core eventually.
Gavin Shan29310e52013-06-20 13:21:13 +08001433 */
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001434static int pnv_eeh_next_error(struct eeh_pe **pe)
Gavin Shan29310e52013-06-20 13:21:13 +08001435{
1436 struct pci_controller *hose;
Gavin Shan2a485ad2015-02-16 14:45:46 +11001437 struct pnv_phb *phb;
1438 struct eeh_pe *phb_pe, *parent_pe;
1439 __be64 frozen_pe_no;
1440 __be16 err_type, severity;
1441 int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
1442 long rc;
1443 int state, ret = EEH_NEXT_ERR_NONE;
1444
1445 /*
Alistair Popple79231442015-05-15 14:06:40 +10001446 * While running here, it's safe to purge the event queue. The
1447 * event should still be masked.
Gavin Shan2a485ad2015-02-16 14:45:46 +11001448 */
1449 eeh_remove_event(NULL, false);
Gavin Shan29310e52013-06-20 13:21:13 +08001450
1451 list_for_each_entry(hose, &hose_list, list_node) {
Gavin Shan2a485ad2015-02-16 14:45:46 +11001452 /*
1453 * If the subordinate PCI buses of the PHB has been
1454 * removed or is exactly under error recovery, we
1455 * needn't take care of it any more.
1456 */
Gavin Shan29310e52013-06-20 13:21:13 +08001457 phb = hose->private_data;
Gavin Shan2a485ad2015-02-16 14:45:46 +11001458 phb_pe = eeh_phb_pe_get(hose);
1459 if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED))
1460 continue;
1461
1462 rc = opal_pci_next_error(phb->opal_id,
1463 &frozen_pe_no, &err_type, &severity);
1464 if (rc != OPAL_SUCCESS) {
1465 pr_devel("%s: Invalid return value on "
1466 "PHB#%x (0x%lx) from opal_pci_next_error",
1467 __func__, hose->global_number, rc);
1468 continue;
1469 }
1470
1471 /* If the PHB doesn't have error, stop processing */
1472 if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR ||
1473 be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) {
1474 pr_devel("%s: No error found on PHB#%x\n",
1475 __func__, hose->global_number);
1476 continue;
1477 }
1478
1479 /*
1480 * Processing the error. We're expecting the error with
1481 * highest priority reported upon multiple errors on the
1482 * specific PHB.
1483 */
1484 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
1485 __func__, be16_to_cpu(err_type),
1486 be16_to_cpu(severity), be64_to_cpu(frozen_pe_no),
1487 hose->global_number);
1488 switch (be16_to_cpu(err_type)) {
1489 case OPAL_EEH_IOC_ERROR:
1490 if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) {
1491 pr_err("EEH: dead IOC detected\n");
1492 ret = EEH_NEXT_ERR_DEAD_IOC;
1493 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
1494 pr_info("EEH: IOC informative error "
1495 "detected\n");
1496 pnv_eeh_get_and_dump_hub_diag(hose);
1497 ret = EEH_NEXT_ERR_NONE;
1498 }
1499
1500 break;
1501 case OPAL_EEH_PHB_ERROR:
1502 if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) {
1503 *pe = phb_pe;
1504 pr_err("EEH: dead PHB#%x detected, "
1505 "location: %s\n",
1506 hose->global_number,
1507 eeh_pe_loc_get(phb_pe));
1508 ret = EEH_NEXT_ERR_DEAD_PHB;
1509 } else if (be16_to_cpu(severity) ==
1510 OPAL_EEH_SEV_PHB_FENCED) {
1511 *pe = phb_pe;
1512 pr_err("EEH: Fenced PHB#%x detected, "
1513 "location: %s\n",
1514 hose->global_number,
1515 eeh_pe_loc_get(phb_pe));
1516 ret = EEH_NEXT_ERR_FENCED_PHB;
1517 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
1518 pr_info("EEH: PHB#%x informative error "
1519 "detected, location: %s\n",
1520 hose->global_number,
1521 eeh_pe_loc_get(phb_pe));
1522 pnv_eeh_get_phb_diag(phb_pe);
1523 pnv_pci_dump_phb_diag_data(hose, phb_pe->data);
1524 ret = EEH_NEXT_ERR_NONE;
1525 }
1526
1527 break;
1528 case OPAL_EEH_PE_ERROR:
1529 /*
1530 * If we can't find the corresponding PE, we
1531 * just try to unfreeze.
1532 */
1533 if (pnv_eeh_get_pe(hose,
1534 be64_to_cpu(frozen_pe_no), pe)) {
Gavin Shan2a485ad2015-02-16 14:45:46 +11001535 pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
Gavin Shan0f36db72015-05-12 17:05:22 +10001536 hose->global_number, be64_to_cpu(frozen_pe_no));
Gavin Shan2a485ad2015-02-16 14:45:46 +11001537 pr_info("EEH: PHB location: %s\n",
1538 eeh_pe_loc_get(phb_pe));
Gavin Shan79cd9522015-05-12 17:05:32 +10001539
1540 /* Dump PHB diag-data */
1541 rc = opal_pci_get_phb_diag_data2(phb->opal_id,
1542 phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
1543 if (rc == OPAL_SUCCESS)
1544 pnv_pci_dump_phb_diag_data(hose,
1545 phb->diag.blob);
1546
1547 /* Try best to clear it */
Gavin Shan2a485ad2015-02-16 14:45:46 +11001548 opal_pci_eeh_freeze_clear(phb->opal_id,
Gavin Shand63e51b2016-08-02 14:10:29 +10001549 be64_to_cpu(frozen_pe_no),
Gavin Shan2a485ad2015-02-16 14:45:46 +11001550 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
1551 ret = EEH_NEXT_ERR_NONE;
1552 } else if ((*pe)->state & EEH_PE_ISOLATED ||
1553 eeh_pe_passed(*pe)) {
1554 ret = EEH_NEXT_ERR_NONE;
1555 } else {
1556 pr_err("EEH: Frozen PE#%x "
1557 "on PHB#%x detected\n",
1558 (*pe)->addr,
1559 (*pe)->phb->global_number);
1560 pr_err("EEH: PE location: %s, "
1561 "PHB location: %s\n",
1562 eeh_pe_loc_get(*pe),
1563 eeh_pe_loc_get(phb_pe));
1564 ret = EEH_NEXT_ERR_FROZEN_PE;
1565 }
1566
1567 break;
1568 default:
1569 pr_warn("%s: Unexpected error type %d\n",
1570 __func__, be16_to_cpu(err_type));
1571 }
1572
1573 /*
1574 * EEH core will try recover from fenced PHB or
1575 * frozen PE. In the time for frozen PE, EEH core
1576 * enable IO path for that before collecting logs,
1577 * but it ruins the site. So we have to dump the
1578 * log in advance here.
1579 */
1580 if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
1581 ret == EEH_NEXT_ERR_FENCED_PHB) &&
1582 !((*pe)->state & EEH_PE_ISOLATED)) {
1583 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
1584 pnv_eeh_get_phb_diag(*pe);
1585
1586 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
1587 pnv_pci_dump_phb_diag_data((*pe)->phb,
1588 (*pe)->data);
1589 }
1590
1591 /*
1592 * We probably have the frozen parent PE out there and
1593 * we need have to handle frozen parent PE firstly.
1594 */
1595 if (ret == EEH_NEXT_ERR_FROZEN_PE) {
1596 parent_pe = (*pe)->parent;
1597 while (parent_pe) {
1598 /* Hit the ceiling ? */
1599 if (parent_pe->type & EEH_PE_PHB)
1600 break;
1601
1602 /* Frozen parent PE ? */
1603 state = eeh_ops->get_state(parent_pe, NULL);
1604 if (state > 0 &&
1605 (state & active_flags) != active_flags)
1606 *pe = parent_pe;
1607
1608 /* Next parent level */
1609 parent_pe = parent_pe->parent;
1610 }
1611
1612 /* We possibly migrate to another PE */
1613 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
1614 }
1615
1616 /*
1617 * If we have no errors on the specific PHB or only
1618 * informative error there, we continue poking it.
1619 * Otherwise, we need actions to be taken by upper
1620 * layer.
1621 */
1622 if (ret > EEH_NEXT_ERR_INF)
1623 break;
Gavin Shan29310e52013-06-20 13:21:13 +08001624 }
1625
Alistair Popple79231442015-05-15 14:06:40 +10001626 /* Unmask the event */
Alistair Poppleb8d65e92015-07-30 16:53:54 +10001627 if (ret == EEH_NEXT_ERR_NONE && eeh_enabled())
Alistair Popple79231442015-05-15 14:06:40 +10001628 enable_irq(eeh_event_irq);
1629
Gavin Shan2a485ad2015-02-16 14:45:46 +11001630 return ret;
Gavin Shan29310e52013-06-20 13:21:13 +08001631}
1632
Wei Yang0dc28302016-03-04 10:53:10 +11001633static int pnv_eeh_restore_vf_config(struct pci_dn *pdn)
1634{
1635 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
1636 u32 devctl, cmd, cap2, aer_capctl;
1637 int old_mps;
1638
1639 if (edev->pcie_cap) {
1640 /* Restore MPS */
1641 old_mps = (ffs(pdn->mps) - 8) << 5;
1642 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
1643 2, &devctl);
1644 devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
1645 devctl |= old_mps;
1646 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
1647 2, devctl);
1648
1649 /* Disable Completion Timeout */
1650 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP2,
1651 4, &cap2);
1652 if (cap2 & 0x10) {
1653 eeh_ops->read_config(pdn,
1654 edev->pcie_cap + PCI_EXP_DEVCTL2,
1655 4, &cap2);
1656 cap2 |= 0x10;
1657 eeh_ops->write_config(pdn,
1658 edev->pcie_cap + PCI_EXP_DEVCTL2,
1659 4, cap2);
1660 }
1661 }
1662
1663 /* Enable SERR and parity checking */
1664 eeh_ops->read_config(pdn, PCI_COMMAND, 2, &cmd);
1665 cmd |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1666 eeh_ops->write_config(pdn, PCI_COMMAND, 2, cmd);
1667
1668 /* Enable report various errors */
1669 if (edev->pcie_cap) {
1670 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
1671 2, &devctl);
1672 devctl &= ~PCI_EXP_DEVCTL_CERE;
1673 devctl |= (PCI_EXP_DEVCTL_NFERE |
1674 PCI_EXP_DEVCTL_FERE |
1675 PCI_EXP_DEVCTL_URRE);
1676 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
1677 2, devctl);
1678 }
1679
1680 /* Enable ECRC generation and check */
1681 if (edev->pcie_cap && edev->aer_cap) {
1682 eeh_ops->read_config(pdn, edev->aer_cap + PCI_ERR_CAP,
1683 4, &aer_capctl);
1684 aer_capctl |= (PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
1685 eeh_ops->write_config(pdn, edev->aer_cap + PCI_ERR_CAP,
1686 4, aer_capctl);
1687 }
1688
1689 return 0;
1690}
1691
Gavin Shan0bd78582015-03-17 16:15:07 +11001692static int pnv_eeh_restore_config(struct pci_dn *pdn)
Gavin Shan9be3becc2014-01-03 17:47:13 +08001693{
Gavin Shan0bd78582015-03-17 16:15:07 +11001694 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
Gavin Shan9be3becc2014-01-03 17:47:13 +08001695 struct pnv_phb *phb;
1696 s64 ret;
1697
1698 if (!edev)
1699 return -EEXIST;
1700
Wei Yang0dc28302016-03-04 10:53:10 +11001701 /*
1702 * We have to restore the PCI config space after reset since the
1703 * firmware can't see SRIOV VFs.
1704 *
1705 * FIXME: The MPS, error routing rules, timeout setting are worthy
1706 * to be exported by firmware in extendible way.
1707 */
1708 if (edev->physfn) {
1709 ret = pnv_eeh_restore_vf_config(pdn);
1710 } else {
1711 phb = edev->phb->private_data;
1712 ret = opal_pci_reinit(phb->opal_id,
1713 OPAL_REINIT_PCI_DEV, edev->config_addr);
1714 }
1715
Gavin Shan9be3becc2014-01-03 17:47:13 +08001716 if (ret) {
1717 pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
1718 __func__, edev->config_addr, ret);
1719 return -EIO;
1720 }
1721
1722 return 0;
1723}
1724
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001725static struct eeh_ops pnv_eeh_ops = {
Gavin Shan29310e52013-06-20 13:21:13 +08001726 .name = "powernv",
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001727 .init = pnv_eeh_init,
1728 .post_init = pnv_eeh_post_init,
Gavin Shanff57b452015-03-17 16:15:06 +11001729 .probe = pnv_eeh_probe,
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001730 .set_option = pnv_eeh_set_option,
1731 .get_pe_addr = pnv_eeh_get_pe_addr,
1732 .get_state = pnv_eeh_get_state,
1733 .reset = pnv_eeh_reset,
1734 .wait_state = pnv_eeh_wait_state,
1735 .get_log = pnv_eeh_get_log,
1736 .configure_bridge = pnv_eeh_configure_bridge,
1737 .err_inject = pnv_eeh_err_inject,
1738 .read_config = pnv_eeh_read_config,
1739 .write_config = pnv_eeh_write_config,
1740 .next_error = pnv_eeh_next_error,
1741 .restore_config = pnv_eeh_restore_config
Gavin Shan29310e52013-06-20 13:21:13 +08001742};
1743
Wei Yangc29fa272016-03-04 10:53:08 +11001744void pcibios_bus_add_device(struct pci_dev *pdev)
1745{
1746 struct pci_dn *pdn = pci_get_pdn(pdev);
1747
1748 if (!pdev->is_virtfn)
1749 return;
1750
1751 /*
1752 * The following operations will fail if VF's sysfs files
1753 * aren't created or its resources aren't finalized.
1754 */
1755 eeh_add_device_early(pdn);
1756 eeh_add_device_late(pdev);
1757 eeh_sysfs_add_device(pdev);
1758}
1759
Wei Yang0dc28302016-03-04 10:53:10 +11001760#ifdef CONFIG_PCI_IOV
1761static void pnv_pci_fixup_vf_mps(struct pci_dev *pdev)
1762{
1763 struct pci_dn *pdn = pci_get_pdn(pdev);
1764 int parent_mps;
1765
1766 if (!pdev->is_virtfn)
1767 return;
1768
1769 /* Synchronize MPS for VF and PF */
1770 parent_mps = pcie_get_mps(pdev->physfn);
1771 if ((128 << pdev->pcie_mpss) >= parent_mps)
1772 pcie_set_mps(pdev, parent_mps);
1773 pdn->mps = pcie_get_mps(pdev);
1774}
1775DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pnv_pci_fixup_vf_mps);
1776#endif /* CONFIG_PCI_IOV */
1777
Gavin Shan29310e52013-06-20 13:21:13 +08001778/**
1779 * eeh_powernv_init - Register platform dependent EEH operations
1780 *
1781 * EEH initialization on powernv platform. This function should be
1782 * called before any EEH related functions.
1783 */
1784static int __init eeh_powernv_init(void)
1785{
1786 int ret = -EINVAL;
1787
Gavin Shanbb593c02014-07-17 14:41:43 +10001788 eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE);
Gavin Shan01f3bfb2015-02-16 14:45:39 +11001789 ret = eeh_ops_register(&pnv_eeh_ops);
Gavin Shan29310e52013-06-20 13:21:13 +08001790 if (!ret)
1791 pr_info("EEH: PowerNV platform initialized\n");
1792 else
1793 pr_info("EEH: Failed to initialize PowerNV platform (%d)\n", ret);
1794
1795 return ret;
1796}
Michael Ellermanb14726c2014-07-15 22:22:24 +10001797machine_early_initcall(powernv, eeh_powernv_init);