blob: 864e4ffb6aa94ecb154b76c861c4eda3fdd11ec4 [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Joerg Roedele3c495c2011-11-09 12:31:15 +01002/*
3 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
Joerg Roedel63ce3ae2015-02-04 16:12:55 +01004 * Author: Joerg Roedel <jroedel@suse.de>
Joerg Roedele3c495c2011-11-09 12:31:15 +01005 */
6
Joerg Roedel101fa032018-11-27 16:22:31 +01007#define pr_fmt(fmt) "AMD-Vi: " fmt
8
Xiyu Yang via iommu8bc54822021-07-19 16:32:58 +08009#include <linux/refcount.h>
Joerg Roedel8736b2c2011-11-24 16:21:52 +010010#include <linux/mmu_notifier.h>
Joerg Roedeled96f222011-11-23 17:30:39 +010011#include <linux/amd-iommu.h>
12#include <linux/mm_types.h>
Joerg Roedel8736b2c2011-11-24 16:21:52 +010013#include <linux/profile.h>
Joerg Roedele3c495c2011-11-09 12:31:15 +010014#include <linux/module.h>
Joerg Roedel2d5503b2011-11-24 10:41:57 +010015#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010016#include <linux/sched/mm.h>
Joerg Roedel028eeac2011-11-24 12:48:13 +010017#include <linux/wait.h>
Joerg Roedeled96f222011-11-23 17:30:39 +010018#include <linux/pci.h>
19#include <linux/gfp.h>
Tom Lendackye9d1d2b2021-09-08 17:58:39 -050020#include <linux/cc_platform.h>
Joerg Roedeled96f222011-11-23 17:30:39 +010021
Joerg Roedel786dfe42020-05-27 13:53:11 +020022#include "amd_iommu.h"
Joerg Roedele3c495c2011-11-09 12:31:15 +010023
24MODULE_LICENSE("GPL v2");
Joerg Roedel63ce3ae2015-02-04 16:12:55 +010025MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
Joerg Roedele3c495c2011-11-09 12:31:15 +010026
Joerg Roedeled96f222011-11-23 17:30:39 +010027#define PRI_QUEUE_SIZE 512
28
29struct pri_queue {
30 atomic_t inflight;
31 bool finish;
Joerg Roedel028eeac2011-11-24 12:48:13 +010032 int status;
Joerg Roedeled96f222011-11-23 17:30:39 +010033};
34
35struct pasid_state {
36 struct list_head list; /* For global state-list */
Xiyu Yang via iommu8bc54822021-07-19 16:32:58 +080037 refcount_t count; /* Reference count */
Joerg Roedeld73a6d72014-06-20 16:14:22 +020038 unsigned mmu_notifier_count; /* Counting nested mmu_notifier
Joerg Roedele79df312014-05-20 23:18:26 +020039 calls */
Joerg Roedeled96f222011-11-23 17:30:39 +010040 struct mm_struct *mm; /* mm_struct for the faults */
Joerg Roedelff6d0cc2014-07-08 12:49:50 +020041 struct mmu_notifier mn; /* mmu_notifier handle */
Joerg Roedeled96f222011-11-23 17:30:39 +010042 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
43 struct device_state *device_state; /* Link to our device_state */
Fenghua Yuc7b6bac2020-09-15 09:30:05 -070044 u32 pasid; /* PASID index */
Joerg Roedeld9e16112014-07-09 15:43:11 +020045 bool invalid; /* Used during setup and
46 teardown of the pasid */
Joerg Roedeld73a6d72014-06-20 16:14:22 +020047 spinlock_t lock; /* Protect pri_queues and
48 mmu_notifer_count */
Joerg Roedel028eeac2011-11-24 12:48:13 +010049 wait_queue_head_t wq; /* To wait for count == 0 */
Joerg Roedeled96f222011-11-23 17:30:39 +010050};
51
52struct device_state {
Joerg Roedel741669c2014-05-20 23:18:23 +020053 struct list_head list;
Vasant Hegde196dff72022-07-06 17:08:24 +053054 u32 sbdf;
Joerg Roedeled96f222011-11-23 17:30:39 +010055 atomic_t count;
56 struct pci_dev *pdev;
57 struct pasid_state **states;
58 struct iommu_domain *domain;
59 int pasid_levels;
60 int max_pasids;
Joerg Roedel175d6142011-11-28 14:36:36 +010061 amd_iommu_invalid_ppr_cb inv_ppr_cb;
Joerg Roedelbc216622011-12-07 12:24:42 +010062 amd_iommu_invalidate_ctx inv_ctx_cb;
Joerg Roedeled96f222011-11-23 17:30:39 +010063 spinlock_t lock;
Joerg Roedel028eeac2011-11-24 12:48:13 +010064 wait_queue_head_t wq;
65};
66
67struct fault {
68 struct work_struct work;
69 struct device_state *dev_state;
70 struct pasid_state *state;
71 struct mm_struct *mm;
72 u64 address;
Fenghua Yuc7b6bac2020-09-15 09:30:05 -070073 u32 pasid;
Joerg Roedel028eeac2011-11-24 12:48:13 +010074 u16 tag;
75 u16 finish;
76 u16 flags;
Joerg Roedeled96f222011-11-23 17:30:39 +010077};
78
Joerg Roedel741669c2014-05-20 23:18:23 +020079static LIST_HEAD(state_list);
Zheng Yongjun106650f2020-12-28 21:51:12 +080080static DEFINE_SPINLOCK(state_lock);
Joerg Roedeled96f222011-11-23 17:30:39 +010081
Joerg Roedel028eeac2011-11-24 12:48:13 +010082static struct workqueue_struct *iommu_wq;
83
Joerg Roedel2d5503b2011-11-24 10:41:57 +010084static void free_pasid_states(struct device_state *dev_state);
Joerg Roedeled96f222011-11-23 17:30:39 +010085
Vasant Hegde196dff72022-07-06 17:08:24 +053086static struct device_state *__get_device_state(u32 sbdf)
Joerg Roedelb87d2d72014-05-20 23:18:22 +020087{
Joerg Roedel741669c2014-05-20 23:18:23 +020088 struct device_state *dev_state;
89
90 list_for_each_entry(dev_state, &state_list, list) {
Vasant Hegde196dff72022-07-06 17:08:24 +053091 if (dev_state->sbdf == sbdf)
Joerg Roedel741669c2014-05-20 23:18:23 +020092 return dev_state;
93 }
94
95 return NULL;
Joerg Roedelb87d2d72014-05-20 23:18:22 +020096}
97
Vasant Hegde196dff72022-07-06 17:08:24 +053098static struct device_state *get_device_state(u32 sbdf)
Joerg Roedeled96f222011-11-23 17:30:39 +010099{
100 struct device_state *dev_state;
101 unsigned long flags;
102
103 spin_lock_irqsave(&state_lock, flags);
Vasant Hegde196dff72022-07-06 17:08:24 +0530104 dev_state = __get_device_state(sbdf);
Joerg Roedeled96f222011-11-23 17:30:39 +0100105 if (dev_state != NULL)
106 atomic_inc(&dev_state->count);
107 spin_unlock_irqrestore(&state_lock, flags);
108
109 return dev_state;
110}
111
112static void free_device_state(struct device_state *dev_state)
113{
Joerg Roedel55c99a42015-07-28 16:58:47 +0200114 struct iommu_group *group;
115
Suravee Suthikulpanit9f968fc2022-03-01 14:26:26 +0530116 /* Get rid of any remaining pasid states */
117 free_pasid_states(dev_state);
118
119 /*
120 * Wait until the last reference is dropped before freeing
121 * the device state.
122 */
123 wait_event(dev_state->wq, !atomic_read(&dev_state->count));
124
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100125 /*
126 * First detach device from domain - No more PRI requests will arrive
127 * from that device after it is unbound from the IOMMUv2 domain.
128 */
Joerg Roedel55c99a42015-07-28 16:58:47 +0200129 group = iommu_group_get(&dev_state->pdev->dev);
130 if (WARN_ON(!group))
131 return;
132
133 iommu_detach_group(dev_state->domain, group);
134
135 iommu_group_put(group);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100136
137 /* Everything is down now, free the IOMMUv2 domain */
Joerg Roedeled96f222011-11-23 17:30:39 +0100138 iommu_domain_free(dev_state->domain);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100139
140 /* Finally get rid of the device-state */
Joerg Roedeled96f222011-11-23 17:30:39 +0100141 kfree(dev_state);
142}
143
144static void put_device_state(struct device_state *dev_state)
145{
146 if (atomic_dec_and_test(&dev_state->count))
Joerg Roedel028eeac2011-11-24 12:48:13 +0100147 wake_up(&dev_state->wq);
Joerg Roedeled96f222011-11-23 17:30:39 +0100148}
149
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100150/* Must be called under dev_state->lock */
151static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700152 u32 pasid, bool alloc)
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100153{
154 struct pasid_state **root, **ptr;
155 int level, index;
156
157 level = dev_state->pasid_levels;
158 root = dev_state->states;
159
160 while (true) {
161
162 index = (pasid >> (9 * level)) & 0x1ff;
163 ptr = &root[index];
164
165 if (level == 0)
166 break;
167
168 if (*ptr == NULL) {
169 if (!alloc)
170 return NULL;
171
172 *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
173 if (*ptr == NULL)
174 return NULL;
175 }
176
177 root = (struct pasid_state **)*ptr;
178 level -= 1;
179 }
180
181 return ptr;
182}
183
184static int set_pasid_state(struct device_state *dev_state,
185 struct pasid_state *pasid_state,
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700186 u32 pasid)
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100187{
188 struct pasid_state **ptr;
189 unsigned long flags;
190 int ret;
191
192 spin_lock_irqsave(&dev_state->lock, flags);
193 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
194
195 ret = -ENOMEM;
196 if (ptr == NULL)
197 goto out_unlock;
198
199 ret = -ENOMEM;
200 if (*ptr != NULL)
201 goto out_unlock;
202
203 *ptr = pasid_state;
204
205 ret = 0;
206
207out_unlock:
208 spin_unlock_irqrestore(&dev_state->lock, flags);
209
210 return ret;
211}
212
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700213static void clear_pasid_state(struct device_state *dev_state, u32 pasid)
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100214{
215 struct pasid_state **ptr;
216 unsigned long flags;
217
218 spin_lock_irqsave(&dev_state->lock, flags);
219 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
220
221 if (ptr == NULL)
222 goto out_unlock;
223
224 *ptr = NULL;
225
226out_unlock:
227 spin_unlock_irqrestore(&dev_state->lock, flags);
228}
229
230static struct pasid_state *get_pasid_state(struct device_state *dev_state,
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700231 u32 pasid)
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100232{
233 struct pasid_state **ptr, *ret = NULL;
234 unsigned long flags;
235
236 spin_lock_irqsave(&dev_state->lock, flags);
237 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
238
239 if (ptr == NULL)
240 goto out_unlock;
241
242 ret = *ptr;
243 if (ret)
Xiyu Yang via iommu8bc54822021-07-19 16:32:58 +0800244 refcount_inc(&ret->count);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100245
246out_unlock:
247 spin_unlock_irqrestore(&dev_state->lock, flags);
248
249 return ret;
250}
251
252static void free_pasid_state(struct pasid_state *pasid_state)
253{
254 kfree(pasid_state);
255}
256
257static void put_pasid_state(struct pasid_state *pasid_state)
258{
Xiyu Yang via iommu8bc54822021-07-19 16:32:58 +0800259 if (refcount_dec_and_test(&pasid_state->count))
Joerg Roedel028eeac2011-11-24 12:48:13 +0100260 wake_up(&pasid_state->wq);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100261}
262
Joerg Roedel028eeac2011-11-24 12:48:13 +0100263static void put_pasid_state_wait(struct pasid_state *pasid_state)
264{
Xiyu Yang via iommu8bc54822021-07-19 16:32:58 +0800265 refcount_dec(&pasid_state->count);
266 wait_event(pasid_state->wq, !refcount_read(&pasid_state->count));
Joerg Roedel028eeac2011-11-24 12:48:13 +0100267 free_pasid_state(pasid_state);
268}
269
Joerg Roedel61feb432014-07-08 14:19:35 +0200270static void unbind_pasid(struct pasid_state *pasid_state)
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100271{
272 struct iommu_domain *domain;
273
274 domain = pasid_state->device_state->domain;
275
Joerg Roedel53d340e2014-07-08 15:01:43 +0200276 /*
277 * Mark pasid_state as invalid, no more faults will we added to the
278 * work queue after this is visible everywhere.
279 */
280 pasid_state->invalid = true;
281
282 /* Make sure this is visible */
283 smp_wmb();
284
285 /* After this the device/pasid can't access the mm anymore */
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100286 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100287
288 /* Make sure no more pending faults are in the queue */
289 flush_workqueue(iommu_wq);
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100290}
291
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100292static void free_pasid_states_level1(struct pasid_state **tbl)
293{
294 int i;
295
296 for (i = 0; i < 512; ++i) {
297 if (tbl[i] == NULL)
298 continue;
299
300 free_page((unsigned long)tbl[i]);
301 }
302}
303
304static void free_pasid_states_level2(struct pasid_state **tbl)
305{
306 struct pasid_state **ptr;
307 int i;
308
309 for (i = 0; i < 512; ++i) {
310 if (tbl[i] == NULL)
311 continue;
312
313 ptr = (struct pasid_state **)tbl[i];
314 free_pasid_states_level1(ptr);
315 }
316}
317
318static void free_pasid_states(struct device_state *dev_state)
319{
320 struct pasid_state *pasid_state;
321 int i;
322
323 for (i = 0; i < dev_state->max_pasids; ++i) {
324 pasid_state = get_pasid_state(dev_state, i);
325 if (pasid_state == NULL)
326 continue;
327
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100328 put_pasid_state(pasid_state);
Joerg Roedela40d4c62014-05-20 23:18:24 +0200329
330 /*
331 * This will call the mn_release function and
332 * unbind the PASID
333 */
334 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
Joerg Roedelc5db16a2014-07-08 14:15:45 +0200335
336 put_pasid_state_wait(pasid_state); /* Reference taken in
Joerg Roedeldaff2f92014-07-30 16:04:40 +0200337 amd_iommu_bind_pasid */
Joerg Roedel75058a32014-07-30 16:04:39 +0200338
339 /* Drop reference taken in amd_iommu_bind_pasid */
340 put_device_state(dev_state);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100341 }
342
343 if (dev_state->pasid_levels == 2)
344 free_pasid_states_level2(dev_state->states);
345 else if (dev_state->pasid_levels == 1)
346 free_pasid_states_level1(dev_state->states);
Joerg Roedel23d3a982015-08-13 11:15:13 +0200347 else
348 BUG_ON(dev_state->pasid_levels != 0);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100349
350 free_page((unsigned long)dev_state->states);
351}
352
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100353static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
354{
355 return container_of(mn, struct pasid_state, mn);
356}
357
Joerg Roedele7cc3dd2014-11-13 13:46:09 +1100358static void mn_invalidate_range(struct mmu_notifier *mn,
359 struct mm_struct *mm,
360 unsigned long start, unsigned long end)
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100361{
362 struct pasid_state *pasid_state;
363 struct device_state *dev_state;
364
365 pasid_state = mn_to_state(mn);
366 dev_state = pasid_state->device_state;
367
Joerg Roedele7cc3dd2014-11-13 13:46:09 +1100368 if ((start ^ (end - 1)) < PAGE_SIZE)
369 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
370 start);
371 else
372 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100373}
374
Joerg Roedela40d4c62014-05-20 23:18:24 +0200375static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
376{
377 struct pasid_state *pasid_state;
378 struct device_state *dev_state;
Joerg Roedeld9e16112014-07-09 15:43:11 +0200379 bool run_inv_ctx_cb;
Joerg Roedela40d4c62014-05-20 23:18:24 +0200380
381 might_sleep();
382
Joerg Roedeld9e16112014-07-09 15:43:11 +0200383 pasid_state = mn_to_state(mn);
384 dev_state = pasid_state->device_state;
385 run_inv_ctx_cb = !pasid_state->invalid;
Joerg Roedela40d4c62014-05-20 23:18:24 +0200386
Dan Carpenter940f700d2015-02-20 13:52:01 +0300387 if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
Joerg Roedela40d4c62014-05-20 23:18:24 +0200388 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
389
Joerg Roedel61feb432014-07-08 14:19:35 +0200390 unbind_pasid(pasid_state);
Joerg Roedela40d4c62014-05-20 23:18:24 +0200391}
392
Julia Lawall759ce232015-11-29 23:02:50 +0100393static const struct mmu_notifier_ops iommu_mn = {
Joerg Roedela40d4c62014-05-20 23:18:24 +0200394 .release = mn_release,
Joerg Roedele7cc3dd2014-11-13 13:46:09 +1100395 .invalidate_range = mn_invalidate_range,
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100396};
397
Joerg Roedel028eeac2011-11-24 12:48:13 +0100398static void set_pri_tag_status(struct pasid_state *pasid_state,
399 u16 tag, int status)
400{
401 unsigned long flags;
402
403 spin_lock_irqsave(&pasid_state->lock, flags);
404 pasid_state->pri[tag].status = status;
405 spin_unlock_irqrestore(&pasid_state->lock, flags);
406}
407
408static void finish_pri_tag(struct device_state *dev_state,
409 struct pasid_state *pasid_state,
410 u16 tag)
411{
412 unsigned long flags;
413
414 spin_lock_irqsave(&pasid_state->lock, flags);
415 if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
416 pasid_state->pri[tag].finish) {
417 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
418 pasid_state->pri[tag].status, tag);
419 pasid_state->pri[tag].finish = false;
420 pasid_state->pri[tag].status = PPR_SUCCESS;
421 }
422 spin_unlock_irqrestore(&pasid_state->lock, flags);
423}
424
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800425static void handle_fault_error(struct fault *fault)
426{
427 int status;
428
429 if (!fault->dev_state->inv_ppr_cb) {
430 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
431 return;
432 }
433
434 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
435 fault->pasid,
436 fault->address,
437 fault->flags);
438 switch (status) {
439 case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
440 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
441 break;
442 case AMD_IOMMU_INV_PRI_RSP_INVALID:
443 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
444 break;
445 case AMD_IOMMU_INV_PRI_RSP_FAIL:
446 set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
447 break;
448 default:
449 BUG();
450 }
451}
452
Joerg Roedel7b5cc1a2015-11-17 16:11:36 +0100453static bool access_error(struct vm_area_struct *vma, struct fault *fault)
454{
455 unsigned long requested = 0;
456
457 if (fault->flags & PPR_FAULT_EXEC)
458 requested |= VM_EXEC;
459
460 if (fault->flags & PPR_FAULT_READ)
461 requested |= VM_READ;
462
463 if (fault->flags & PPR_FAULT_WRITE)
464 requested |= VM_WRITE;
465
466 return (requested & ~vma->vm_flags) != 0;
467}
468
Joerg Roedel028eeac2011-11-24 12:48:13 +0100469static void do_fault(struct work_struct *work)
470{
471 struct fault *fault = container_of(work, struct fault, work);
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800472 struct vm_area_struct *vma;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700473 vm_fault_t ret = VM_FAULT_ERROR;
Joerg Roedel43c0ea22015-11-17 16:11:37 +0100474 unsigned int flags = 0;
475 struct mm_struct *mm;
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800476 u64 address;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100477
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800478 mm = fault->state->mm;
479 address = fault->address;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100480
Joerg Roedel43c0ea22015-11-17 16:11:37 +0100481 if (fault->flags & PPR_FAULT_USER)
482 flags |= FAULT_FLAG_USER;
483 if (fault->flags & PPR_FAULT_WRITE)
484 flags |= FAULT_FLAG_WRITE;
Dave Hansen1b2ee122016-02-12 13:02:21 -0800485 flags |= FAULT_FLAG_REMOTE;
Joerg Roedel43c0ea22015-11-17 16:11:37 +0100486
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700487 mmap_read_lock(mm);
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800488 vma = find_extend_vma(mm, address);
Joerg Roedel492e7452015-11-17 16:11:38 +0100489 if (!vma || address < vma->vm_start)
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800490 /* failed to get a vma in the right range */
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800491 goto out;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100492
Joerg Roedel7b5cc1a2015-11-17 16:11:36 +0100493 /* Check if we have the right permissions on the vma */
Joerg Roedel492e7452015-11-17 16:11:38 +0100494 if (access_error(vma, fault))
Jay Cornwalld14f6fc2015-09-16 14:10:03 -0500495 goto out;
Jay Cornwalld14f6fc2015-09-16 14:10:03 -0500496
Peter Xubce617e2020-08-11 18:37:44 -0700497 ret = handle_mm_fault(vma, address, flags, NULL);
Jesse Barnes9dc00f42014-12-12 16:55:30 -0800498out:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700499 mmap_read_unlock(mm);
Joerg Roedel492e7452015-11-17 16:11:38 +0100500
501 if (ret & VM_FAULT_ERROR)
502 /* failed to service fault */
503 handle_fault_error(fault);
504
Joerg Roedel028eeac2011-11-24 12:48:13 +0100505 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
506
507 put_pasid_state(fault->state);
508
509 kfree(fault);
510}
511
512static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
513{
514 struct amd_iommu_fault *iommu_fault;
515 struct pasid_state *pasid_state;
516 struct device_state *dev_state;
Joerg Roedelfb1b69552020-05-27 13:53:05 +0200517 struct pci_dev *pdev = NULL;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100518 unsigned long flags;
519 struct fault *fault;
520 bool finish;
Vasant Hegde214a05c2022-07-06 17:08:25 +0530521 u16 tag, devid, seg_id;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100522 int ret;
523
524 iommu_fault = data;
525 tag = iommu_fault->tag & 0x1ff;
526 finish = (iommu_fault->tag >> 9) & 1;
527
Vasant Hegde214a05c2022-07-06 17:08:25 +0530528 seg_id = PCI_SBDF_TO_SEGID(iommu_fault->sbdf);
529 devid = PCI_SBDF_TO_DEVID(iommu_fault->sbdf);
530 pdev = pci_get_domain_bus_and_slot(seg_id, PCI_BUS_NUM(devid),
Sinan Kayad5bf0f42017-12-19 00:37:47 -0500531 devid & 0xff);
Baoquan Hedaae2d22017-08-09 16:33:43 +0800532 if (!pdev)
533 return -ENODEV;
Joerg Roedelfb1b69552020-05-27 13:53:05 +0200534
535 ret = NOTIFY_DONE;
Baoquan Hedaae2d22017-08-09 16:33:43 +0800536
537 /* In kdump kernel pci dev is not initialized yet -> send INVALID */
Lu Baolu41bb23e2022-02-16 10:52:48 +0800538 if (amd_iommu_is_attach_deferred(&pdev->dev)) {
Baoquan Hedaae2d22017-08-09 16:33:43 +0800539 amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
540 PPR_INVALID, tag);
541 goto out;
542 }
543
Vasant Hegde214a05c2022-07-06 17:08:25 +0530544 dev_state = get_device_state(iommu_fault->sbdf);
Joerg Roedel028eeac2011-11-24 12:48:13 +0100545 if (dev_state == NULL)
546 goto out;
547
548 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
Joerg Roedel53d340e2014-07-08 15:01:43 +0200549 if (pasid_state == NULL || pasid_state->invalid) {
Joerg Roedel028eeac2011-11-24 12:48:13 +0100550 /* We know the device but not the PASID -> send INVALID */
551 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
552 PPR_INVALID, tag);
553 goto out_drop_state;
554 }
555
556 spin_lock_irqsave(&pasid_state->lock, flags);
557 atomic_inc(&pasid_state->pri[tag].inflight);
558 if (finish)
559 pasid_state->pri[tag].finish = true;
560 spin_unlock_irqrestore(&pasid_state->lock, flags);
561
562 fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
563 if (fault == NULL) {
564 /* We are OOM - send success and let the device re-fault */
565 finish_pri_tag(dev_state, pasid_state, tag);
566 goto out_drop_state;
567 }
568
569 fault->dev_state = dev_state;
570 fault->address = iommu_fault->address;
571 fault->state = pasid_state;
572 fault->tag = tag;
573 fault->finish = finish;
Alexey Skidanovb00675b2014-07-08 17:30:16 +0300574 fault->pasid = iommu_fault->pasid;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100575 fault->flags = iommu_fault->flags;
576 INIT_WORK(&fault->work, do_fault);
577
578 queue_work(iommu_wq, &fault->work);
579
580 ret = NOTIFY_OK;
581
582out_drop_state:
Joerg Roedeldc88db72014-07-08 14:55:10 +0200583
584 if (ret != NOTIFY_OK && pasid_state)
585 put_pasid_state(pasid_state);
586
Joerg Roedel028eeac2011-11-24 12:48:13 +0100587 put_device_state(dev_state);
588
589out:
Yang Yingliang6cf09812022-11-18 17:36:04 +0800590 pci_dev_put(pdev);
Joerg Roedel028eeac2011-11-24 12:48:13 +0100591 return ret;
592}
593
594static struct notifier_block ppr_nb = {
595 .notifier_call = ppr_notifier,
596};
597
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700598int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100599 struct task_struct *task)
600{
601 struct pasid_state *pasid_state;
602 struct device_state *dev_state;
Joerg Roedelf0aac632014-07-08 15:15:07 +0200603 struct mm_struct *mm;
Vasant Hegde196dff72022-07-06 17:08:24 +0530604 u32 sbdf;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100605 int ret;
606
607 might_sleep();
608
609 if (!amd_iommu_v2_supported())
610 return -ENODEV;
611
Vasant Hegde196dff72022-07-06 17:08:24 +0530612 sbdf = get_pci_sbdf_id(pdev);
613 dev_state = get_device_state(sbdf);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100614
615 if (dev_state == NULL)
616 return -EINVAL;
617
618 ret = -EINVAL;
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700619 if (pasid >= dev_state->max_pasids)
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100620 goto out;
621
622 ret = -ENOMEM;
623 pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
624 if (pasid_state == NULL)
625 goto out;
626
Joerg Roedelf0aac632014-07-08 15:15:07 +0200627
Xiyu Yang via iommu8bc54822021-07-19 16:32:58 +0800628 refcount_set(&pasid_state->count, 1);
Joerg Roedel028eeac2011-11-24 12:48:13 +0100629 init_waitqueue_head(&pasid_state->wq);
Joerg Roedel2c13d472012-07-19 10:56:10 +0200630 spin_lock_init(&pasid_state->lock);
631
Joerg Roedelf0aac632014-07-08 15:15:07 +0200632 mm = get_task_mm(task);
Joerg Roedelf0aac632014-07-08 15:15:07 +0200633 pasid_state->mm = mm;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100634 pasid_state->device_state = dev_state;
635 pasid_state->pasid = pasid;
Joerg Roedeld9e16112014-07-09 15:43:11 +0200636 pasid_state->invalid = true; /* Mark as valid only if we are
637 done with setting up the pasid */
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100638 pasid_state->mn.ops = &iommu_mn;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100639
640 if (pasid_state->mm == NULL)
641 goto out_free;
642
Denis Arefevb09b5672022-11-18 13:42:52 +0300643 ret = mmu_notifier_register(&pasid_state->mn, mm);
644 if (ret)
645 goto out_free;
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100646
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100647 ret = set_pasid_state(dev_state, pasid_state, pasid);
648 if (ret)
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100649 goto out_unregister;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100650
651 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
652 __pa(pasid_state->mm->pgd));
653 if (ret)
654 goto out_clear_state;
655
Joerg Roedeld9e16112014-07-09 15:43:11 +0200656 /* Now we are ready to handle faults */
657 pasid_state->invalid = false;
658
Joerg Roedelf0aac632014-07-08 15:15:07 +0200659 /*
660 * Drop the reference to the mm_struct here. We rely on the
661 * mmu_notifier release call-back to inform us when the mm
662 * is going away.
663 */
664 mmput(mm);
665
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100666 return 0;
667
668out_clear_state:
669 clear_pasid_state(dev_state, pasid);
670
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100671out_unregister:
Joerg Roedelf0aac632014-07-08 15:15:07 +0200672 mmu_notifier_unregister(&pasid_state->mn, mm);
Pan Bian73dbd4a2017-04-23 18:23:21 +0800673 mmput(mm);
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100674
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100675out_free:
Joerg Roedel028eeac2011-11-24 12:48:13 +0100676 free_pasid_state(pasid_state);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100677
678out:
679 put_device_state(dev_state);
680
681 return ret;
682}
683EXPORT_SYMBOL(amd_iommu_bind_pasid);
684
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700685void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid)
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100686{
Joerg Roedela40d4c62014-05-20 23:18:24 +0200687 struct pasid_state *pasid_state;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100688 struct device_state *dev_state;
Vasant Hegde196dff72022-07-06 17:08:24 +0530689 u32 sbdf;
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100690
691 might_sleep();
692
693 if (!amd_iommu_v2_supported())
694 return;
695
Vasant Hegde196dff72022-07-06 17:08:24 +0530696 sbdf = get_pci_sbdf_id(pdev);
697 dev_state = get_device_state(sbdf);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100698 if (dev_state == NULL)
699 return;
700
Fenghua Yuc7b6bac2020-09-15 09:30:05 -0700701 if (pasid >= dev_state->max_pasids)
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100702 goto out;
703
Joerg Roedela40d4c62014-05-20 23:18:24 +0200704 pasid_state = get_pasid_state(dev_state, pasid);
705 if (pasid_state == NULL)
706 goto out;
707 /*
708 * Drop reference taken here. We are safe because we still hold
709 * the reference taken in the amd_iommu_bind_pasid function.
710 */
711 put_pasid_state(pasid_state);
712
Joerg Roedel53d340e2014-07-08 15:01:43 +0200713 /* Clear the pasid state so that the pasid can be re-used */
714 clear_pasid_state(dev_state, pasid_state->pasid);
715
Joerg Roedelf0aac632014-07-08 15:15:07 +0200716 /*
Joerg Roedelfcaa9602014-07-30 16:04:37 +0200717 * Call mmu_notifier_unregister to drop our reference
718 * to pasid_state->mm
Joerg Roedelf0aac632014-07-08 15:15:07 +0200719 */
Joerg Roedelfcaa9602014-07-30 16:04:37 +0200720 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100721
Joerg Roedelc5db16a2014-07-08 14:15:45 +0200722 put_pasid_state_wait(pasid_state); /* Reference taken in
Joerg Roedeldaff2f92014-07-30 16:04:40 +0200723 amd_iommu_bind_pasid */
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100724out:
Joerg Roedel75058a32014-07-30 16:04:39 +0200725 /* Drop reference taken in this function */
726 put_device_state(dev_state);
727
728 /* Drop reference taken in amd_iommu_bind_pasid */
Joerg Roedel2d5503b2011-11-24 10:41:57 +0100729 put_device_state(dev_state);
730}
731EXPORT_SYMBOL(amd_iommu_unbind_pasid);
732
Joerg Roedeled96f222011-11-23 17:30:39 +0100733int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
734{
735 struct device_state *dev_state;
Joerg Roedel55c99a42015-07-28 16:58:47 +0200736 struct iommu_group *group;
Joerg Roedeled96f222011-11-23 17:30:39 +0100737 unsigned long flags;
738 int ret, tmp;
Vasant Hegde196dff72022-07-06 17:08:24 +0530739 u32 sbdf;
Joerg Roedeled96f222011-11-23 17:30:39 +0100740
741 might_sleep();
742
Joerg Roedel2822e582020-08-24 12:54:15 +0200743 /*
744 * When memory encryption is active the device is likely not in a
745 * direct-mapped domain. Forbid using IOMMUv2 functionality for now.
746 */
Tom Lendackye9d1d2b2021-09-08 17:58:39 -0500747 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
Joerg Roedel2822e582020-08-24 12:54:15 +0200748 return -ENODEV;
749
Joerg Roedeled96f222011-11-23 17:30:39 +0100750 if (!amd_iommu_v2_supported())
751 return -ENODEV;
752
753 if (pasids <= 0 || pasids > (PASID_MASK + 1))
754 return -EINVAL;
755
Vasant Hegde196dff72022-07-06 17:08:24 +0530756 sbdf = get_pci_sbdf_id(pdev);
Joerg Roedeled96f222011-11-23 17:30:39 +0100757
758 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
759 if (dev_state == NULL)
760 return -ENOMEM;
761
762 spin_lock_init(&dev_state->lock);
Joerg Roedel028eeac2011-11-24 12:48:13 +0100763 init_waitqueue_head(&dev_state->wq);
Joerg Roedel741669c2014-05-20 23:18:23 +0200764 dev_state->pdev = pdev;
Vasant Hegde196dff72022-07-06 17:08:24 +0530765 dev_state->sbdf = sbdf;
Joerg Roedeled96f222011-11-23 17:30:39 +0100766
767 tmp = pasids;
768 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
769 dev_state->pasid_levels += 1;
770
771 atomic_set(&dev_state->count, 1);
772 dev_state->max_pasids = pasids;
773
774 ret = -ENOMEM;
775 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
776 if (dev_state->states == NULL)
777 goto out_free_dev_state;
778
779 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
780 if (dev_state->domain == NULL)
781 goto out_free_states;
782
Jason Gunthorpe2380f1e2022-09-09 16:46:31 -0300783 /* See iommu_is_default_domain() */
784 dev_state->domain->type = IOMMU_DOMAIN_IDENTITY;
Joerg Roedeled96f222011-11-23 17:30:39 +0100785 amd_iommu_domain_direct_map(dev_state->domain);
786
787 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
788 if (ret)
789 goto out_free_domain;
790
Joerg Roedel55c99a42015-07-28 16:58:47 +0200791 group = iommu_group_get(&pdev->dev);
Dan Carpenter24c790f2016-11-24 14:05:44 +0300792 if (!group) {
793 ret = -EINVAL;
Joerg Roedeled96f222011-11-23 17:30:39 +0100794 goto out_free_domain;
Dan Carpenter24c790f2016-11-24 14:05:44 +0300795 }
Joerg Roedeled96f222011-11-23 17:30:39 +0100796
Joerg Roedel55c99a42015-07-28 16:58:47 +0200797 ret = iommu_attach_group(dev_state->domain, group);
798 if (ret != 0)
799 goto out_drop_group;
800
801 iommu_group_put(group);
802
Joerg Roedeled96f222011-11-23 17:30:39 +0100803 spin_lock_irqsave(&state_lock, flags);
804
Vasant Hegde196dff72022-07-06 17:08:24 +0530805 if (__get_device_state(sbdf) != NULL) {
Joerg Roedeled96f222011-11-23 17:30:39 +0100806 spin_unlock_irqrestore(&state_lock, flags);
807 ret = -EBUSY;
808 goto out_free_domain;
809 }
810
Joerg Roedel741669c2014-05-20 23:18:23 +0200811 list_add_tail(&dev_state->list, &state_list);
Joerg Roedeled96f222011-11-23 17:30:39 +0100812
813 spin_unlock_irqrestore(&state_lock, flags);
814
815 return 0;
816
Joerg Roedel55c99a42015-07-28 16:58:47 +0200817out_drop_group:
818 iommu_group_put(group);
819
Joerg Roedeled96f222011-11-23 17:30:39 +0100820out_free_domain:
821 iommu_domain_free(dev_state->domain);
822
823out_free_states:
824 free_page((unsigned long)dev_state->states);
825
826out_free_dev_state:
827 kfree(dev_state);
828
829 return ret;
830}
831EXPORT_SYMBOL(amd_iommu_init_device);
832
833void amd_iommu_free_device(struct pci_dev *pdev)
834{
835 struct device_state *dev_state;
836 unsigned long flags;
Vasant Hegde196dff72022-07-06 17:08:24 +0530837 u32 sbdf;
Joerg Roedeled96f222011-11-23 17:30:39 +0100838
839 if (!amd_iommu_v2_supported())
840 return;
841
Vasant Hegde196dff72022-07-06 17:08:24 +0530842 sbdf = get_pci_sbdf_id(pdev);
Joerg Roedeled96f222011-11-23 17:30:39 +0100843
844 spin_lock_irqsave(&state_lock, flags);
845
Vasant Hegde196dff72022-07-06 17:08:24 +0530846 dev_state = __get_device_state(sbdf);
Joerg Roedeled96f222011-11-23 17:30:39 +0100847 if (dev_state == NULL) {
848 spin_unlock_irqrestore(&state_lock, flags);
849 return;
850 }
851
Joerg Roedel741669c2014-05-20 23:18:23 +0200852 list_del(&dev_state->list);
Joerg Roedeled96f222011-11-23 17:30:39 +0100853
854 spin_unlock_irqrestore(&state_lock, flags);
855
Peter Zijlstra91f65fa2015-02-03 13:25:51 +0100856 put_device_state(dev_state);
Peter Zijlstra91f65fa2015-02-03 13:25:51 +0100857 free_device_state(dev_state);
Joerg Roedeled96f222011-11-23 17:30:39 +0100858}
859EXPORT_SYMBOL(amd_iommu_free_device);
860
Joerg Roedel175d6142011-11-28 14:36:36 +0100861int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
862 amd_iommu_invalid_ppr_cb cb)
863{
864 struct device_state *dev_state;
865 unsigned long flags;
Vasant Hegde196dff72022-07-06 17:08:24 +0530866 u32 sbdf;
Joerg Roedel175d6142011-11-28 14:36:36 +0100867 int ret;
868
869 if (!amd_iommu_v2_supported())
870 return -ENODEV;
871
Vasant Hegde196dff72022-07-06 17:08:24 +0530872 sbdf = get_pci_sbdf_id(pdev);
Joerg Roedel175d6142011-11-28 14:36:36 +0100873
874 spin_lock_irqsave(&state_lock, flags);
875
876 ret = -EINVAL;
Vasant Hegde196dff72022-07-06 17:08:24 +0530877 dev_state = __get_device_state(sbdf);
Joerg Roedel175d6142011-11-28 14:36:36 +0100878 if (dev_state == NULL)
879 goto out_unlock;
880
881 dev_state->inv_ppr_cb = cb;
882
883 ret = 0;
884
885out_unlock:
886 spin_unlock_irqrestore(&state_lock, flags);
887
888 return ret;
889}
890EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
891
Joerg Roedelbc216622011-12-07 12:24:42 +0100892int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
893 amd_iommu_invalidate_ctx cb)
894{
895 struct device_state *dev_state;
896 unsigned long flags;
Vasant Hegde196dff72022-07-06 17:08:24 +0530897 u32 sbdf;
Joerg Roedelbc216622011-12-07 12:24:42 +0100898 int ret;
899
900 if (!amd_iommu_v2_supported())
901 return -ENODEV;
902
Vasant Hegde196dff72022-07-06 17:08:24 +0530903 sbdf = get_pci_sbdf_id(pdev);
Joerg Roedelbc216622011-12-07 12:24:42 +0100904
905 spin_lock_irqsave(&state_lock, flags);
906
907 ret = -EINVAL;
Vasant Hegde196dff72022-07-06 17:08:24 +0530908 dev_state = __get_device_state(sbdf);
Joerg Roedelbc216622011-12-07 12:24:42 +0100909 if (dev_state == NULL)
910 goto out_unlock;
911
912 dev_state->inv_ctx_cb = cb;
913
914 ret = 0;
915
916out_unlock:
917 spin_unlock_irqrestore(&state_lock, flags);
918
919 return ret;
920}
921EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
922
Joerg Roedele3c495c2011-11-09 12:31:15 +0100923static int __init amd_iommu_v2_init(void)
924{
Joerg Roedel028eeac2011-11-24 12:48:13 +0100925 int ret;
Joerg Roedeled96f222011-11-23 17:30:39 +0100926
Joerg Roedel474d567d2012-03-15 12:46:40 +0100927 if (!amd_iommu_v2_supported()) {
Joerg Roedel717e88a2021-11-23 11:55:07 +0100928 pr_info("AMD IOMMUv2 functionality not available on this system - This is not a bug.\n");
Joerg Roedel474d567d2012-03-15 12:46:40 +0100929 /*
930 * Load anyway to provide the symbols to other modules
931 * which may use AMD IOMMUv2 optionally.
932 */
933 return 0;
934 }
Joerg Roedele3c495c2011-11-09 12:31:15 +0100935
Joerg Roedel028eeac2011-11-24 12:48:13 +0100936 ret = -ENOMEM;
Bhaktipriya Shridharcf7513e2016-06-18 13:58:30 +0530937 iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100938 if (iommu_wq == NULL)
Joerg Roedel741669c2014-05-20 23:18:23 +0200939 goto out;
Joerg Roedel8736b2c2011-11-24 16:21:52 +0100940
Joerg Roedel028eeac2011-11-24 12:48:13 +0100941 amd_iommu_register_ppr_notifier(&ppr_nb);
942
Joerg Roedel717e88a2021-11-23 11:55:07 +0100943 pr_info("AMD IOMMUv2 loaded and initialized\n");
944
Joerg Roedele3c495c2011-11-09 12:31:15 +0100945 return 0;
Joerg Roedel028eeac2011-11-24 12:48:13 +0100946
Joerg Roedel741669c2014-05-20 23:18:23 +0200947out:
Joerg Roedel028eeac2011-11-24 12:48:13 +0100948 return ret;
Joerg Roedele3c495c2011-11-09 12:31:15 +0100949}
950
951static void __exit amd_iommu_v2_exit(void)
952{
Suravee Suthikulpanit9f968fc2022-03-01 14:26:26 +0530953 struct device_state *dev_state, *next;
954 unsigned long flags;
Suravee Suthikulpanit5edde872022-03-13 21:43:21 -0500955 LIST_HEAD(freelist);
Joerg Roedeled96f222011-11-23 17:30:39 +0100956
Joerg Roedel474d567d2012-03-15 12:46:40 +0100957 if (!amd_iommu_v2_supported())
958 return;
959
Joerg Roedel028eeac2011-11-24 12:48:13 +0100960 amd_iommu_unregister_ppr_notifier(&ppr_nb);
961
962 flush_workqueue(iommu_wq);
963
964 /*
965 * The loop below might call flush_workqueue(), so call
966 * destroy_workqueue() after it
967 */
Suravee Suthikulpanit9f968fc2022-03-01 14:26:26 +0530968 spin_lock_irqsave(&state_lock, flags);
Joerg Roedeled96f222011-11-23 17:30:39 +0100969
Suravee Suthikulpanit9f968fc2022-03-01 14:26:26 +0530970 list_for_each_entry_safe(dev_state, next, &state_list, list) {
Joerg Roedeled96f222011-11-23 17:30:39 +0100971 WARN_ON_ONCE(1);
972
Joerg Roedeled96f222011-11-23 17:30:39 +0100973 put_device_state(dev_state);
Suravee Suthikulpanit9f968fc2022-03-01 14:26:26 +0530974 list_del(&dev_state->list);
Suravee Suthikulpanit5edde872022-03-13 21:43:21 -0500975 list_add_tail(&dev_state->list, &freelist);
Joerg Roedeled96f222011-11-23 17:30:39 +0100976 }
977
Suravee Suthikulpanit9f968fc2022-03-01 14:26:26 +0530978 spin_unlock_irqrestore(&state_lock, flags);
979
Suravee Suthikulpanit5edde872022-03-13 21:43:21 -0500980 /*
981 * Since free_device_state waits on the count to be zero,
982 * we need to free dev_state outside the spinlock.
983 */
984 list_for_each_entry_safe(dev_state, next, &freelist, list) {
985 list_del(&dev_state->list);
986 free_device_state(dev_state);
987 }
988
Joerg Roedel028eeac2011-11-24 12:48:13 +0100989 destroy_workqueue(iommu_wq);
Joerg Roedele3c495c2011-11-09 12:31:15 +0100990}
991
992module_init(amd_iommu_v2_init);
993module_exit(amd_iommu_v2_exit);