blob: a7a8dc182efb98e741d90443f75bd53da15fac71 [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linas Vepstas172ca922005-11-03 18:50:04 -06002/*
Linas Vepstas172ca922005-11-03 18:50:04 -06003 *
4 * Copyright (c) 2005 Linas Vepstas <linas@linas.org>
5 */
6
Linas Vepstasac325ac2006-04-18 21:05:21 -07007#include <linux/delay.h>
Linas Vepstas172ca922005-11-03 18:50:04 -06008#include <linux/list.h>
Paul Gortmaker62fe91b2011-05-27 14:25:11 -04009#include <linux/sched.h>
Gavin Shanc860855802013-06-20 13:21:00 +080010#include <linux/semaphore.h>
Linas Vepstas172ca922005-11-03 18:50:04 -060011#include <linux/pci.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Al Viroecf89e52012-10-02 15:32:10 -040013#include <linux/kthread.h>
Linas Vepstas172ca922005-11-03 18:50:04 -060014#include <asm/eeh_event.h>
Linas Vepstas77bd7412005-11-03 18:52:49 -060015#include <asm/ppc-pci.h>
Linas Vepstas172ca922005-11-03 18:50:04 -060016
17/** Overview:
18 * EEH error states may be detected within exception handlers;
19 * however, the recovery processing needs to occur asynchronously
20 * in a normal kernel context and not an interrupt context.
21 * This pair of routines creates an event and queues it onto a
22 * work-queue, where a worker thread can drive recovery.
23 */
24
Ingo Molnar34af9462006-06-27 02:53:55 -070025static DEFINE_SPINLOCK(eeh_eventlist_lock);
Arnd Bergmann2fea82db2018-12-10 22:51:57 +010026static DECLARE_COMPLETION(eeh_eventlist_event);
Daniel Axtens635218c2016-01-06 11:45:50 +110027static LIST_HEAD(eeh_eventlist);
Linas Vepstas8c33fd112006-03-29 15:29:18 -060028
Linas Vepstas172ca922005-11-03 18:50:04 -060029/**
Gavin Shan29f8bf12012-02-27 20:04:02 +000030 * eeh_event_handler - Dispatch EEH events.
Linas Vepstas172ca922005-11-03 18:50:04 -060031 * @dummy - unused
Linas Vepstas8c33fd112006-03-29 15:29:18 -060032 *
33 * The detection of a frozen slot can occur inside an interrupt,
34 * where it can be hard to do anything about it. The goal of this
35 * routine is to pull these detection events out of the context
36 * of the interrupt handler, and re-dispatch them for processing
37 * at a later time in a normal context.
Linas Vepstas172ca922005-11-03 18:50:04 -060038 */
39static int eeh_event_handler(void * dummy)
40{
41 unsigned long flags;
Gavin Shan40a7cd92012-02-27 20:04:08 +000042 struct eeh_event *event;
Linas Vepstas172ca922005-11-03 18:50:04 -060043
Gavin Shanc860855802013-06-20 13:21:00 +080044 while (!kthread_should_stop()) {
Arnd Bergmann2fea82db2018-12-10 22:51:57 +010045 if (wait_for_completion_interruptible(&eeh_eventlist_event))
Gavin Shan5459ae12013-06-25 14:35:28 +080046 break;
Linas Vepstas172ca922005-11-03 18:50:04 -060047
Gavin Shanc860855802013-06-20 13:21:00 +080048 /* Fetch EEH event from the queue */
49 spin_lock_irqsave(&eeh_eventlist_lock, flags);
50 event = NULL;
51 if (!list_empty(&eeh_eventlist)) {
52 event = list_entry(eeh_eventlist.next,
53 struct eeh_event, list);
54 list_del(&event->list);
55 }
56 spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
57 if (!event)
58 continue;
Linas Vepstas77bd7412005-11-03 18:52:49 -060059
Gavin Shanc860855802013-06-20 13:21:00 +080060 /* We might have event without binding PE */
Oliver O'Halloran25baf3d2019-09-03 20:15:56 +100061 if (event->pe)
62 eeh_handle_normal_event(event->pe);
63 else
Sam Bobroff68701782018-03-19 13:46:20 +110064 eeh_handle_special_event();
Linas Vepstas8c33fd112006-03-29 15:29:18 -060065
Gavin Shanc860855802013-06-20 13:21:00 +080066 kfree(event);
Linas Vepstas172ca922005-11-03 18:50:04 -060067 }
68
69 return 0;
70}
71
72/**
Gavin Shanc860855802013-06-20 13:21:00 +080073 * eeh_event_init - Start kernel thread to handle EEH events
Gavin Shan29f8bf12012-02-27 20:04:02 +000074 *
75 * This routine is called to start the kernel thread for processing
76 * EEH event.
Linas Vepstas172ca922005-11-03 18:50:04 -060077 */
Gavin Shanc860855802013-06-20 13:21:00 +080078int eeh_event_init(void)
Linas Vepstas172ca922005-11-03 18:50:04 -060079{
Gavin Shanc860855802013-06-20 13:21:00 +080080 struct task_struct *t;
81 int ret = 0;
82
Gavin Shanc860855802013-06-20 13:21:00 +080083 t = kthread_run(eeh_event_handler, NULL, "eehd");
84 if (IS_ERR(t)) {
85 ret = PTR_ERR(t);
86 pr_err("%s: Failed to start EEH daemon (%d)\n",
87 __func__, ret);
88 return ret;
89 }
90
91 return 0;
Linas Vepstas172ca922005-11-03 18:50:04 -060092}
93
94/**
Gavin Shan29f8bf12012-02-27 20:04:02 +000095 * eeh_send_failure_event - Generate a PCI error event
Gavin Shanc533b46c2012-09-07 22:44:11 +000096 * @pe: EEH PE
Linas Vepstas172ca922005-11-03 18:50:04 -060097 *
98 * This routine can be called within an interrupt context;
99 * the actual event will be delivered in a normal context
100 * (from a workqueue).
101 */
Oliver O'Halloran954bd992019-02-15 11:48:17 +1100102int __eeh_send_failure_event(struct eeh_pe *pe)
Linas Vepstas172ca922005-11-03 18:50:04 -0600103{
104 unsigned long flags;
105 struct eeh_event *event;
106
Gavin Shan7e4bbaf2012-09-07 22:44:03 +0000107 event = kzalloc(sizeof(*event), GFP_ATOMIC);
Gavin Shanc533b46c2012-09-07 22:44:11 +0000108 if (!event) {
109 pr_err("EEH: out of memory, event not handled\n");
110 return -ENOMEM;
111 }
112 event->pe = pe;
Linas Vepstas172ca922005-11-03 18:50:04 -0600113
Oliver O'Halloran799abe22019-09-03 20:15:52 +1000114 /*
115 * Mark the PE as recovering before inserting it in the queue.
116 * This prevents the PE from being free()ed by a hotplug driver
117 * while the PE is sitting in the event queue.
118 */
Oliver O'Halloran25baf3d2019-09-03 20:15:56 +1000119 if (pe) {
Michael Ellerman1b7f3b6c2019-09-13 23:32:13 +1000120#ifdef CONFIG_STACKTRACE
Oliver O'Halloran25baf3d2019-09-03 20:15:56 +1000121 /*
122 * Save the current stack trace so we can dump it from the
123 * event handler thread.
124 */
125 pe->trace_entries = stack_trace_save(pe->stack_trace,
126 ARRAY_SIZE(pe->stack_trace), 0);
Michael Ellerman1b7f3b6c2019-09-13 23:32:13 +1000127#endif /* CONFIG_STACKTRACE */
Oliver O'Halloran25baf3d2019-09-03 20:15:56 +1000128
Oliver O'Halloran799abe22019-09-03 20:15:52 +1000129 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
Oliver O'Halloran25baf3d2019-09-03 20:15:56 +1000130 }
Oliver O'Halloran799abe22019-09-03 20:15:52 +1000131
Linas Vepstas172ca922005-11-03 18:50:04 -0600132 /* We may or may not be called in an interrupt context */
133 spin_lock_irqsave(&eeh_eventlist_lock, flags);
134 list_add(&event->list, &eeh_eventlist);
135 spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
136
Gavin Shanc860855802013-06-20 13:21:00 +0800137 /* For EEH deamon to knick in */
Arnd Bergmann2fea82db2018-12-10 22:51:57 +0100138 complete(&eeh_eventlist_event);
Linas Vepstas172ca922005-11-03 18:50:04 -0600139
140 return 0;
141}
Gavin Shan99866592013-06-20 13:21:02 +0800142
Oliver O'Halloran954bd992019-02-15 11:48:17 +1100143int eeh_send_failure_event(struct eeh_pe *pe)
144{
145 /*
146 * If we've manually supressed recovery events via debugfs
147 * then just drop it on the floor.
148 */
149 if (eeh_debugfs_no_recover) {
150 pr_err("EEH: Event dropped due to no_recover setting\n");
151 return 0;
152 }
153
154 return __eeh_send_failure_event(pe);
155}
156
Gavin Shan99866592013-06-20 13:21:02 +0800157/**
158 * eeh_remove_event - Remove EEH event from the queue
159 * @pe: Event binding to the PE
Gavin Shan5c7a35e2014-06-04 17:31:52 +1000160 * @force: Event will be removed unconditionally
Gavin Shan99866592013-06-20 13:21:02 +0800161 *
162 * On PowerNV platform, we might have subsequent coming events
163 * is part of the former one. For that case, those subsequent
164 * coming events are totally duplicated and unnecessary, thus
165 * they should be removed.
166 */
Gavin Shan5c7a35e2014-06-04 17:31:52 +1000167void eeh_remove_event(struct eeh_pe *pe, bool force)
Gavin Shan99866592013-06-20 13:21:02 +0800168{
169 unsigned long flags;
170 struct eeh_event *event, *tmp;
171
Gavin Shan5c7a35e2014-06-04 17:31:52 +1000172 /*
173 * If we have NULL PE passed in, we have dead IOC
174 * or we're sure we can report all existing errors
175 * by the caller.
176 *
177 * With "force", the event with associated PE that
178 * have been isolated, the event won't be removed
179 * to avoid event lost.
180 */
Gavin Shan99866592013-06-20 13:21:02 +0800181 spin_lock_irqsave(&eeh_eventlist_lock, flags);
182 list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) {
Gavin Shan5c7a35e2014-06-04 17:31:52 +1000183 if (!force && event->pe &&
184 (event->pe->state & EEH_PE_ISOLATED))
185 continue;
186
Gavin Shan99866592013-06-20 13:21:02 +0800187 if (!pe) {
188 list_del(&event->list);
189 kfree(event);
190 } else if (pe->type & EEH_PE_PHB) {
191 if (event->pe && event->pe->phb == pe->phb) {
192 list_del(&event->list);
193 kfree(event);
194 }
195 } else if (event->pe == pe) {
196 list_del(&event->list);
197 kfree(event);
198 }
199 }
200 spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
201}