Thomas Gleixner | 1a59d1b8 | 2019-05-27 08:55:05 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 2 | /* |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 3 | * |
| 4 | * Copyright (c) 2005 Linas Vepstas <linas@linas.org> |
| 5 | */ |
| 6 | |
Linas Vepstas | ac325ac | 2006-04-18 21:05:21 -0700 | [diff] [blame] | 7 | #include <linux/delay.h> |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 8 | #include <linux/list.h> |
Paul Gortmaker | 62fe91b | 2011-05-27 14:25:11 -0400 | [diff] [blame] | 9 | #include <linux/sched.h> |
Gavin Shan | c86085580 | 2013-06-20 13:21:00 +0800 | [diff] [blame] | 10 | #include <linux/semaphore.h> |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 11 | #include <linux/pci.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 12 | #include <linux/slab.h> |
Al Viro | ecf89e5 | 2012-10-02 15:32:10 -0400 | [diff] [blame] | 13 | #include <linux/kthread.h> |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 14 | #include <asm/eeh_event.h> |
Linas Vepstas | 77bd741 | 2005-11-03 18:52:49 -0600 | [diff] [blame] | 15 | #include <asm/ppc-pci.h> |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 16 | |
| 17 | /** Overview: |
| 18 | * EEH error states may be detected within exception handlers; |
| 19 | * however, the recovery processing needs to occur asynchronously |
| 20 | * in a normal kernel context and not an interrupt context. |
| 21 | * This pair of routines creates an event and queues it onto a |
| 22 | * work-queue, where a worker thread can drive recovery. |
| 23 | */ |
| 24 | |
Ingo Molnar | 34af946 | 2006-06-27 02:53:55 -0700 | [diff] [blame] | 25 | static DEFINE_SPINLOCK(eeh_eventlist_lock); |
Arnd Bergmann | 2fea82db | 2018-12-10 22:51:57 +0100 | [diff] [blame] | 26 | static DECLARE_COMPLETION(eeh_eventlist_event); |
Daniel Axtens | 635218c | 2016-01-06 11:45:50 +1100 | [diff] [blame] | 27 | static LIST_HEAD(eeh_eventlist); |
Linas Vepstas | 8c33fd11 | 2006-03-29 15:29:18 -0600 | [diff] [blame] | 28 | |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 29 | /** |
Gavin Shan | 29f8bf1 | 2012-02-27 20:04:02 +0000 | [diff] [blame] | 30 | * eeh_event_handler - Dispatch EEH events. |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 31 | * @dummy - unused |
Linas Vepstas | 8c33fd11 | 2006-03-29 15:29:18 -0600 | [diff] [blame] | 32 | * |
| 33 | * The detection of a frozen slot can occur inside an interrupt, |
| 34 | * where it can be hard to do anything about it. The goal of this |
| 35 | * routine is to pull these detection events out of the context |
| 36 | * of the interrupt handler, and re-dispatch them for processing |
| 37 | * at a later time in a normal context. |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 38 | */ |
| 39 | static int eeh_event_handler(void * dummy) |
| 40 | { |
| 41 | unsigned long flags; |
Gavin Shan | 40a7cd9 | 2012-02-27 20:04:08 +0000 | [diff] [blame] | 42 | struct eeh_event *event; |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 43 | |
Gavin Shan | c86085580 | 2013-06-20 13:21:00 +0800 | [diff] [blame] | 44 | while (!kthread_should_stop()) { |
Arnd Bergmann | 2fea82db | 2018-12-10 22:51:57 +0100 | [diff] [blame] | 45 | if (wait_for_completion_interruptible(&eeh_eventlist_event)) |
Gavin Shan | 5459ae1 | 2013-06-25 14:35:28 +0800 | [diff] [blame] | 46 | break; |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 47 | |
Gavin Shan | c86085580 | 2013-06-20 13:21:00 +0800 | [diff] [blame] | 48 | /* Fetch EEH event from the queue */ |
| 49 | spin_lock_irqsave(&eeh_eventlist_lock, flags); |
| 50 | event = NULL; |
| 51 | if (!list_empty(&eeh_eventlist)) { |
| 52 | event = list_entry(eeh_eventlist.next, |
| 53 | struct eeh_event, list); |
| 54 | list_del(&event->list); |
| 55 | } |
| 56 | spin_unlock_irqrestore(&eeh_eventlist_lock, flags); |
| 57 | if (!event) |
| 58 | continue; |
Linas Vepstas | 77bd741 | 2005-11-03 18:52:49 -0600 | [diff] [blame] | 59 | |
Gavin Shan | c86085580 | 2013-06-20 13:21:00 +0800 | [diff] [blame] | 60 | /* We might have event without binding PE */ |
Oliver O'Halloran | 25baf3d | 2019-09-03 20:15:56 +1000 | [diff] [blame] | 61 | if (event->pe) |
| 62 | eeh_handle_normal_event(event->pe); |
| 63 | else |
Sam Bobroff | 6870178 | 2018-03-19 13:46:20 +1100 | [diff] [blame] | 64 | eeh_handle_special_event(); |
Linas Vepstas | 8c33fd11 | 2006-03-29 15:29:18 -0600 | [diff] [blame] | 65 | |
Gavin Shan | c86085580 | 2013-06-20 13:21:00 +0800 | [diff] [blame] | 66 | kfree(event); |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 67 | } |
| 68 | |
| 69 | return 0; |
| 70 | } |
| 71 | |
| 72 | /** |
Gavin Shan | c86085580 | 2013-06-20 13:21:00 +0800 | [diff] [blame] | 73 | * eeh_event_init - Start kernel thread to handle EEH events |
Gavin Shan | 29f8bf1 | 2012-02-27 20:04:02 +0000 | [diff] [blame] | 74 | * |
| 75 | * This routine is called to start the kernel thread for processing |
| 76 | * EEH event. |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 77 | */ |
Gavin Shan | c86085580 | 2013-06-20 13:21:00 +0800 | [diff] [blame] | 78 | int eeh_event_init(void) |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 79 | { |
Gavin Shan | c86085580 | 2013-06-20 13:21:00 +0800 | [diff] [blame] | 80 | struct task_struct *t; |
| 81 | int ret = 0; |
| 82 | |
Gavin Shan | c86085580 | 2013-06-20 13:21:00 +0800 | [diff] [blame] | 83 | t = kthread_run(eeh_event_handler, NULL, "eehd"); |
| 84 | if (IS_ERR(t)) { |
| 85 | ret = PTR_ERR(t); |
| 86 | pr_err("%s: Failed to start EEH daemon (%d)\n", |
| 87 | __func__, ret); |
| 88 | return ret; |
| 89 | } |
| 90 | |
| 91 | return 0; |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 92 | } |
| 93 | |
| 94 | /** |
Gavin Shan | 29f8bf1 | 2012-02-27 20:04:02 +0000 | [diff] [blame] | 95 | * eeh_send_failure_event - Generate a PCI error event |
Gavin Shan | c533b46c | 2012-09-07 22:44:11 +0000 | [diff] [blame] | 96 | * @pe: EEH PE |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 97 | * |
| 98 | * This routine can be called within an interrupt context; |
| 99 | * the actual event will be delivered in a normal context |
| 100 | * (from a workqueue). |
| 101 | */ |
Oliver O'Halloran | 954bd99 | 2019-02-15 11:48:17 +1100 | [diff] [blame] | 102 | int __eeh_send_failure_event(struct eeh_pe *pe) |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 103 | { |
| 104 | unsigned long flags; |
| 105 | struct eeh_event *event; |
| 106 | |
Gavin Shan | 7e4bbaf | 2012-09-07 22:44:03 +0000 | [diff] [blame] | 107 | event = kzalloc(sizeof(*event), GFP_ATOMIC); |
Gavin Shan | c533b46c | 2012-09-07 22:44:11 +0000 | [diff] [blame] | 108 | if (!event) { |
| 109 | pr_err("EEH: out of memory, event not handled\n"); |
| 110 | return -ENOMEM; |
| 111 | } |
| 112 | event->pe = pe; |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 113 | |
Oliver O'Halloran | 799abe2 | 2019-09-03 20:15:52 +1000 | [diff] [blame] | 114 | /* |
| 115 | * Mark the PE as recovering before inserting it in the queue. |
| 116 | * This prevents the PE from being free()ed by a hotplug driver |
| 117 | * while the PE is sitting in the event queue. |
| 118 | */ |
Oliver O'Halloran | 25baf3d | 2019-09-03 20:15:56 +1000 | [diff] [blame] | 119 | if (pe) { |
Michael Ellerman | 1b7f3b6c | 2019-09-13 23:32:13 +1000 | [diff] [blame] | 120 | #ifdef CONFIG_STACKTRACE |
Oliver O'Halloran | 25baf3d | 2019-09-03 20:15:56 +1000 | [diff] [blame] | 121 | /* |
| 122 | * Save the current stack trace so we can dump it from the |
| 123 | * event handler thread. |
| 124 | */ |
| 125 | pe->trace_entries = stack_trace_save(pe->stack_trace, |
| 126 | ARRAY_SIZE(pe->stack_trace), 0); |
Michael Ellerman | 1b7f3b6c | 2019-09-13 23:32:13 +1000 | [diff] [blame] | 127 | #endif /* CONFIG_STACKTRACE */ |
Oliver O'Halloran | 25baf3d | 2019-09-03 20:15:56 +1000 | [diff] [blame] | 128 | |
Oliver O'Halloran | 799abe2 | 2019-09-03 20:15:52 +1000 | [diff] [blame] | 129 | eeh_pe_state_mark(pe, EEH_PE_RECOVERING); |
Oliver O'Halloran | 25baf3d | 2019-09-03 20:15:56 +1000 | [diff] [blame] | 130 | } |
Oliver O'Halloran | 799abe2 | 2019-09-03 20:15:52 +1000 | [diff] [blame] | 131 | |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 132 | /* We may or may not be called in an interrupt context */ |
| 133 | spin_lock_irqsave(&eeh_eventlist_lock, flags); |
| 134 | list_add(&event->list, &eeh_eventlist); |
| 135 | spin_unlock_irqrestore(&eeh_eventlist_lock, flags); |
| 136 | |
Gavin Shan | c86085580 | 2013-06-20 13:21:00 +0800 | [diff] [blame] | 137 | /* For EEH deamon to knick in */ |
Arnd Bergmann | 2fea82db | 2018-12-10 22:51:57 +0100 | [diff] [blame] | 138 | complete(&eeh_eventlist_event); |
Linas Vepstas | 172ca92 | 2005-11-03 18:50:04 -0600 | [diff] [blame] | 139 | |
| 140 | return 0; |
| 141 | } |
Gavin Shan | 9986659 | 2013-06-20 13:21:02 +0800 | [diff] [blame] | 142 | |
Oliver O'Halloran | 954bd99 | 2019-02-15 11:48:17 +1100 | [diff] [blame] | 143 | int eeh_send_failure_event(struct eeh_pe *pe) |
| 144 | { |
| 145 | /* |
| 146 | * If we've manually supressed recovery events via debugfs |
| 147 | * then just drop it on the floor. |
| 148 | */ |
| 149 | if (eeh_debugfs_no_recover) { |
| 150 | pr_err("EEH: Event dropped due to no_recover setting\n"); |
| 151 | return 0; |
| 152 | } |
| 153 | |
| 154 | return __eeh_send_failure_event(pe); |
| 155 | } |
| 156 | |
Gavin Shan | 9986659 | 2013-06-20 13:21:02 +0800 | [diff] [blame] | 157 | /** |
| 158 | * eeh_remove_event - Remove EEH event from the queue |
| 159 | * @pe: Event binding to the PE |
Gavin Shan | 5c7a35e | 2014-06-04 17:31:52 +1000 | [diff] [blame] | 160 | * @force: Event will be removed unconditionally |
Gavin Shan | 9986659 | 2013-06-20 13:21:02 +0800 | [diff] [blame] | 161 | * |
| 162 | * On PowerNV platform, we might have subsequent coming events |
| 163 | * is part of the former one. For that case, those subsequent |
| 164 | * coming events are totally duplicated and unnecessary, thus |
| 165 | * they should be removed. |
| 166 | */ |
Gavin Shan | 5c7a35e | 2014-06-04 17:31:52 +1000 | [diff] [blame] | 167 | void eeh_remove_event(struct eeh_pe *pe, bool force) |
Gavin Shan | 9986659 | 2013-06-20 13:21:02 +0800 | [diff] [blame] | 168 | { |
| 169 | unsigned long flags; |
| 170 | struct eeh_event *event, *tmp; |
| 171 | |
Gavin Shan | 5c7a35e | 2014-06-04 17:31:52 +1000 | [diff] [blame] | 172 | /* |
| 173 | * If we have NULL PE passed in, we have dead IOC |
| 174 | * or we're sure we can report all existing errors |
| 175 | * by the caller. |
| 176 | * |
| 177 | * With "force", the event with associated PE that |
| 178 | * have been isolated, the event won't be removed |
| 179 | * to avoid event lost. |
| 180 | */ |
Gavin Shan | 9986659 | 2013-06-20 13:21:02 +0800 | [diff] [blame] | 181 | spin_lock_irqsave(&eeh_eventlist_lock, flags); |
| 182 | list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) { |
Gavin Shan | 5c7a35e | 2014-06-04 17:31:52 +1000 | [diff] [blame] | 183 | if (!force && event->pe && |
| 184 | (event->pe->state & EEH_PE_ISOLATED)) |
| 185 | continue; |
| 186 | |
Gavin Shan | 9986659 | 2013-06-20 13:21:02 +0800 | [diff] [blame] | 187 | if (!pe) { |
| 188 | list_del(&event->list); |
| 189 | kfree(event); |
| 190 | } else if (pe->type & EEH_PE_PHB) { |
| 191 | if (event->pe && event->pe->phb == pe->phb) { |
| 192 | list_del(&event->list); |
| 193 | kfree(event); |
| 194 | } |
| 195 | } else if (event->pe == pe) { |
| 196 | list_del(&event->list); |
| 197 | kfree(event); |
| 198 | } |
| 199 | } |
| 200 | spin_unlock_irqrestore(&eeh_eventlist_lock, flags); |
| 201 | } |