blob: f5cedf816be183c1a6cd98dc102a65b3e7f7124a [file] [log] [blame]
Thomas Gleixnere62d9492019-05-20 19:07:58 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Shannon Nelson75896702007-10-16 01:27:41 -07002/*
Maciej Sosnowski211a22c2009-02-26 11:05:43 +01003 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
Shannon Nelson75896702007-10-16 01:27:41 -07004 */
5
6/*
7 * This driver supports an interface for DCA clients and providers to meet.
8 */
9
10#include <linux/kernel.h>
11#include <linux/notifier.h>
12#include <linux/device.h>
13#include <linux/dca.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Paul Gortmakerd2298072011-07-03 13:37:11 -040015#include <linux/module.h>
Shannon Nelson75896702007-10-16 01:27:41 -070016
Maciej Sosnowski1a5aeee2009-09-10 15:05:58 +020017#define DCA_VERSION "1.12.1"
Shannon Nelson75896702007-10-16 01:27:41 -070018
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -070019MODULE_VERSION(DCA_VERSION);
Jeff Johnsone9d053f2024-06-15 14:44:08 -070020MODULE_DESCRIPTION("Intel Direct Cache Access (DCA) service module");
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -070021MODULE_LICENSE("GPL");
22MODULE_AUTHOR("Intel Corporation");
Shannon Nelson75896702007-10-16 01:27:41 -070023
Mike Galbraitha1741e72010-07-07 10:29:01 +020024static DEFINE_RAW_SPINLOCK(dca_lock);
Shannon Nelson75896702007-10-16 01:27:41 -070025
Maciej Sosnowski1a5aeee2009-09-10 15:05:58 +020026static LIST_HEAD(dca_domains);
27
Sosnowski, Maciej4e8cec22010-09-16 06:02:26 +000028static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
29
30static int dca_providers_blocked;
31
Maciej Sosnowski1a5aeee2009-09-10 15:05:58 +020032static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
33{
34 struct pci_dev *pdev = to_pci_dev(dev);
35 struct pci_bus *bus = pdev->bus;
36
37 while (bus->parent)
38 bus = bus->parent;
39
40 return bus;
41}
42
43static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
44{
45 struct dca_domain *domain;
46
47 domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
48 if (!domain)
49 return NULL;
50
51 INIT_LIST_HEAD(&domain->dca_providers);
52 domain->pci_rc = rc;
53
54 return domain;
55}
56
57static void dca_free_domain(struct dca_domain *domain)
58{
59 list_del(&domain->node);
60 kfree(domain);
61}
62
Sosnowski, Maciej4e8cec22010-09-16 06:02:26 +000063static int dca_provider_ioat_ver_3_0(struct device *dev)
64{
65 struct pci_dev *pdev = to_pci_dev(dev);
66
67 return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
68 ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
69 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
70 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
71 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
72 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
73 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
74 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
75 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
76}
77
78static void unregister_dca_providers(void)
79{
80 struct dca_provider *dca, *_dca;
81 struct list_head unregistered_providers;
82 struct dca_domain *domain;
83 unsigned long flags;
84
85 blocking_notifier_call_chain(&dca_provider_chain,
86 DCA_PROVIDER_REMOVE, NULL);
87
88 INIT_LIST_HEAD(&unregistered_providers);
89
Mike Galbraitha1741e72010-07-07 10:29:01 +020090 raw_spin_lock_irqsave(&dca_lock, flags);
Sosnowski, Maciej4e8cec22010-09-16 06:02:26 +000091
92 if (list_empty(&dca_domains)) {
Mike Galbraitha1741e72010-07-07 10:29:01 +020093 raw_spin_unlock_irqrestore(&dca_lock, flags);
Sosnowski, Maciej4e8cec22010-09-16 06:02:26 +000094 return;
95 }
96
97 /* at this point only one domain in the list is expected */
98 domain = list_first_entry(&dca_domains, struct dca_domain, node);
Sosnowski, Maciej4e8cec22010-09-16 06:02:26 +000099
Kirill A. Shutemov3bb598f2011-03-22 16:34:18 -0700100 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
101 list_move(&dca->node, &unregistered_providers);
Sosnowski, Maciej4e8cec22010-09-16 06:02:26 +0000102
103 dca_free_domain(domain);
104
Mike Galbraitha1741e72010-07-07 10:29:01 +0200105 raw_spin_unlock_irqrestore(&dca_lock, flags);
Sosnowski, Maciej4e8cec22010-09-16 06:02:26 +0000106
107 list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
108 dca_sysfs_remove_provider(dca);
109 list_del(&dca->node);
110 }
111}
112
Maciej Sosnowski1a5aeee2009-09-10 15:05:58 +0200113static struct dca_domain *dca_find_domain(struct pci_bus *rc)
114{
115 struct dca_domain *domain;
116
117 list_for_each_entry(domain, &dca_domains, node)
118 if (domain->pci_rc == rc)
119 return domain;
120
121 return NULL;
122}
123
124static struct dca_domain *dca_get_domain(struct device *dev)
125{
126 struct pci_bus *rc;
127 struct dca_domain *domain;
128
129 rc = dca_pci_rc_from_dev(dev);
130 domain = dca_find_domain(rc);
131
132 if (!domain) {
Mike Galbraitha1741e72010-07-07 10:29:01 +0200133 if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
Sosnowski, Maciej4e8cec22010-09-16 06:02:26 +0000134 dca_providers_blocked = 1;
Maciej Sosnowski1a5aeee2009-09-10 15:05:58 +0200135 }
136
137 return domain;
138}
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700139
140static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
141{
Maciej Sosnowski1a5aeee2009-09-10 15:05:58 +0200142 struct dca_provider *dca;
143 struct pci_bus *rc;
144 struct dca_domain *domain;
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700145
Maciej Sosnowski1a5aeee2009-09-10 15:05:58 +0200146 if (dev) {
147 rc = dca_pci_rc_from_dev(dev);
148 domain = dca_find_domain(rc);
149 if (!domain)
150 return NULL;
151 } else {
152 if (!list_empty(&dca_domains))
153 domain = list_first_entry(&dca_domains,
154 struct dca_domain,
155 node);
156 else
157 return NULL;
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700158 }
159
Maciej Sosnowski1a5aeee2009-09-10 15:05:58 +0200160 list_for_each_entry(dca, &domain->dca_providers, node)
161 if ((!dev) || (dca->ops->dev_managed(dca, dev)))
162 return dca;
163
164 return NULL;
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700165}
Shannon Nelson75896702007-10-16 01:27:41 -0700166
167/**
168 * dca_add_requester - add a dca client to the list
169 * @dev - the device that wants dca service
170 */
171int dca_add_requester(struct device *dev)
172{
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700173 struct dca_provider *dca;
174 int err, slot = -ENODEV;
Maciej Sosnowskieb4400e2009-02-02 23:26:57 -0800175 unsigned long flags;
Maciej Sosnowski1a5aeee2009-09-10 15:05:58 +0200176 struct pci_bus *pci_rc;
177 struct dca_domain *domain;
Shannon Nelson75896702007-10-16 01:27:41 -0700178
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700179 if (!dev)
180 return -EFAULT;
Shannon Nelson75896702007-10-16 01:27:41 -0700181
Mike Galbraitha1741e72010-07-07 10:29:01 +0200182 raw_spin_lock_irqsave(&dca_lock, flags);
Shannon Nelson75896702007-10-16 01:27:41 -0700183
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700184 /* check if the requester has not been added already */
185 dca = dca_find_provider_by_dev(dev);
186 if (dca) {
Mike Galbraitha1741e72010-07-07 10:29:01 +0200187 raw_spin_unlock_irqrestore(&dca_lock, flags);
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700188 return -EEXIST;
189 }
190
Maciej Sosnowski1a5aeee2009-09-10 15:05:58 +0200191 pci_rc = dca_pci_rc_from_dev(dev);
192 domain = dca_find_domain(pci_rc);
193 if (!domain) {
Mike Galbraitha1741e72010-07-07 10:29:01 +0200194 raw_spin_unlock_irqrestore(&dca_lock, flags);
Maciej Sosnowski1a5aeee2009-09-10 15:05:58 +0200195 return -ENODEV;
196 }
197
198 list_for_each_entry(dca, &domain->dca_providers, node) {
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700199 slot = dca->ops->add_requester(dca, dev);
200 if (slot >= 0)
201 break;
202 }
Maciej Sosnowskieb4400e2009-02-02 23:26:57 -0800203
Mike Galbraitha1741e72010-07-07 10:29:01 +0200204 raw_spin_unlock_irqrestore(&dca_lock, flags);
Maciej Sosnowskieb4400e2009-02-02 23:26:57 -0800205
206 if (slot < 0)
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700207 return slot;
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700208
209 err = dca_sysfs_add_req(dca, dev, slot);
Shannon Nelson75896702007-10-16 01:27:41 -0700210 if (err) {
Mike Galbraitha1741e72010-07-07 10:29:01 +0200211 raw_spin_lock_irqsave(&dca_lock, flags);
Maciej Sosnowskieb4400e2009-02-02 23:26:57 -0800212 if (dca == dca_find_provider_by_dev(dev))
213 dca->ops->remove_requester(dca, dev);
Mike Galbraitha1741e72010-07-07 10:29:01 +0200214 raw_spin_unlock_irqrestore(&dca_lock, flags);
Shannon Nelson75896702007-10-16 01:27:41 -0700215 return err;
216 }
217
218 return 0;
219}
220EXPORT_SYMBOL_GPL(dca_add_requester);
221
222/**
223 * dca_remove_requester - remove a dca client from the list
224 * @dev - the device that wants dca service
225 */
226int dca_remove_requester(struct device *dev)
227{
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700228 struct dca_provider *dca;
Shannon Nelson75896702007-10-16 01:27:41 -0700229 int slot;
Maciej Sosnowskieb4400e2009-02-02 23:26:57 -0800230 unsigned long flags;
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700231
232 if (!dev)
233 return -EFAULT;
Shannon Nelson75896702007-10-16 01:27:41 -0700234
Mike Galbraitha1741e72010-07-07 10:29:01 +0200235 raw_spin_lock_irqsave(&dca_lock, flags);
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700236 dca = dca_find_provider_by_dev(dev);
237 if (!dca) {
Mike Galbraitha1741e72010-07-07 10:29:01 +0200238 raw_spin_unlock_irqrestore(&dca_lock, flags);
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700239 return -ENODEV;
240 }
241 slot = dca->ops->remove_requester(dca, dev);
Mike Galbraitha1741e72010-07-07 10:29:01 +0200242 raw_spin_unlock_irqrestore(&dca_lock, flags);
Maciej Sosnowskieb4400e2009-02-02 23:26:57 -0800243
244 if (slot < 0)
Shannon Nelson75896702007-10-16 01:27:41 -0700245 return slot;
246
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700247 dca_sysfs_remove_req(dca, slot);
248
Shannon Nelson75896702007-10-16 01:27:41 -0700249 return 0;
250}
251EXPORT_SYMBOL_GPL(dca_remove_requester);
252
253/**
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700254 * dca_common_get_tag - return the dca tag (serves both new and old api)
255 * @dev - the device that wants dca service
256 * @cpu - the cpuid as returned by get_cpu()
257 */
Colin Ian King064223c2018-04-23 13:49:38 +0100258static u8 dca_common_get_tag(struct device *dev, int cpu)
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700259{
260 struct dca_provider *dca;
261 u8 tag;
Maciej Sosnowskieb4400e2009-02-02 23:26:57 -0800262 unsigned long flags;
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700263
Mike Galbraitha1741e72010-07-07 10:29:01 +0200264 raw_spin_lock_irqsave(&dca_lock, flags);
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700265
266 dca = dca_find_provider_by_dev(dev);
267 if (!dca) {
Mike Galbraitha1741e72010-07-07 10:29:01 +0200268 raw_spin_unlock_irqrestore(&dca_lock, flags);
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700269 return -ENODEV;
270 }
271 tag = dca->ops->get_tag(dca, dev, cpu);
272
Mike Galbraitha1741e72010-07-07 10:29:01 +0200273 raw_spin_unlock_irqrestore(&dca_lock, flags);
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700274 return tag;
275}
276
277/**
278 * dca3_get_tag - return the dca tag to the requester device
279 * for the given cpu (new api)
280 * @dev - the device that wants dca service
281 * @cpu - the cpuid as returned by get_cpu()
282 */
283u8 dca3_get_tag(struct device *dev, int cpu)
284{
285 if (!dev)
286 return -EFAULT;
287
288 return dca_common_get_tag(dev, cpu);
289}
290EXPORT_SYMBOL_GPL(dca3_get_tag);
291
292/**
293 * dca_get_tag - return the dca tag for the given cpu (old api)
Shannon Nelson75896702007-10-16 01:27:41 -0700294 * @cpu - the cpuid as returned by get_cpu()
295 */
296u8 dca_get_tag(int cpu)
297{
Dan Carpenter3ac39d22023-02-27 13:06:12 +0300298 return dca_common_get_tag(NULL, cpu);
Shannon Nelson75896702007-10-16 01:27:41 -0700299}
300EXPORT_SYMBOL_GPL(dca_get_tag);
301
302/**
303 * alloc_dca_provider - get data struct for describing a dca provider
304 * @ops - pointer to struct of dca operation function pointers
305 * @priv_size - size of extra mem to be added for provider's needs
306 */
Julia Lawall2bb129e2015-11-13 12:46:00 +0100307struct dca_provider *alloc_dca_provider(const struct dca_ops *ops,
308 int priv_size)
Shannon Nelson75896702007-10-16 01:27:41 -0700309{
310 struct dca_provider *dca;
311 int alloc_size;
312
313 alloc_size = (sizeof(*dca) + priv_size);
314 dca = kzalloc(alloc_size, GFP_KERNEL);
315 if (!dca)
316 return NULL;
317 dca->ops = ops;
318
319 return dca;
320}
321EXPORT_SYMBOL_GPL(alloc_dca_provider);
322
323/**
324 * free_dca_provider - release the dca provider data struct
325 * @ops - pointer to struct of dca operation function pointers
326 * @priv_size - size of extra mem to be added for provider's needs
327 */
328void free_dca_provider(struct dca_provider *dca)
329{
330 kfree(dca);
331}
332EXPORT_SYMBOL_GPL(free_dca_provider);
333
Shannon Nelson75896702007-10-16 01:27:41 -0700334/**
335 * register_dca_provider - register a dca provider
336 * @dca - struct created by alloc_dca_provider()
337 * @dev - device providing dca services
338 */
339int register_dca_provider(struct dca_provider *dca, struct device *dev)
340{
341 int err;
Maciej Sosnowskieb4400e2009-02-02 23:26:57 -0800342 unsigned long flags;
Mike Galbraitha1741e72010-07-07 10:29:01 +0200343 struct dca_domain *domain, *newdomain = NULL;
Shannon Nelson75896702007-10-16 01:27:41 -0700344
Mike Galbraitha1741e72010-07-07 10:29:01 +0200345 raw_spin_lock_irqsave(&dca_lock, flags);
Sosnowski, Maciej4e8cec22010-09-16 06:02:26 +0000346 if (dca_providers_blocked) {
Mike Galbraitha1741e72010-07-07 10:29:01 +0200347 raw_spin_unlock_irqrestore(&dca_lock, flags);
Sosnowski, Maciej4e8cec22010-09-16 06:02:26 +0000348 return -ENODEV;
349 }
Mike Galbraitha1741e72010-07-07 10:29:01 +0200350 raw_spin_unlock_irqrestore(&dca_lock, flags);
Sosnowski, Maciej4e8cec22010-09-16 06:02:26 +0000351
Shannon Nelson75896702007-10-16 01:27:41 -0700352 err = dca_sysfs_add_provider(dca, dev);
353 if (err)
354 return err;
Maciej Sosnowskieb4400e2009-02-02 23:26:57 -0800355
Mike Galbraitha1741e72010-07-07 10:29:01 +0200356 raw_spin_lock_irqsave(&dca_lock, flags);
Maciej Sosnowski1a5aeee2009-09-10 15:05:58 +0200357 domain = dca_get_domain(dev);
358 if (!domain) {
Mike Galbraitha1741e72010-07-07 10:29:01 +0200359 struct pci_bus *rc;
360
Sosnowski, Maciej4e8cec22010-09-16 06:02:26 +0000361 if (dca_providers_blocked) {
Mike Galbraitha1741e72010-07-07 10:29:01 +0200362 raw_spin_unlock_irqrestore(&dca_lock, flags);
Sosnowski, Maciej4e8cec22010-09-16 06:02:26 +0000363 dca_sysfs_remove_provider(dca);
364 unregister_dca_providers();
Mike Galbraitha1741e72010-07-07 10:29:01 +0200365 return -ENODEV;
Sosnowski, Maciej4e8cec22010-09-16 06:02:26 +0000366 }
Mike Galbraitha1741e72010-07-07 10:29:01 +0200367
368 raw_spin_unlock_irqrestore(&dca_lock, flags);
369 rc = dca_pci_rc_from_dev(dev);
370 newdomain = dca_allocate_domain(rc);
371 if (!newdomain)
372 return -ENODEV;
373 raw_spin_lock_irqsave(&dca_lock, flags);
374 /* Recheck, we might have raced after dropping the lock */
375 domain = dca_get_domain(dev);
376 if (!domain) {
377 domain = newdomain;
378 newdomain = NULL;
379 list_add(&domain->node, &dca_domains);
380 }
Maciej Sosnowski1a5aeee2009-09-10 15:05:58 +0200381 }
382 list_add(&dca->node, &domain->dca_providers);
Mike Galbraitha1741e72010-07-07 10:29:01 +0200383 raw_spin_unlock_irqrestore(&dca_lock, flags);
Maciej Sosnowskieb4400e2009-02-02 23:26:57 -0800384
Shannon Nelson75896702007-10-16 01:27:41 -0700385 blocking_notifier_call_chain(&dca_provider_chain,
386 DCA_PROVIDER_ADD, NULL);
Mike Galbraitha1741e72010-07-07 10:29:01 +0200387 kfree(newdomain);
Shannon Nelson75896702007-10-16 01:27:41 -0700388 return 0;
389}
390EXPORT_SYMBOL_GPL(register_dca_provider);
391
392/**
393 * unregister_dca_provider - remove a dca provider
394 * @dca - struct created by alloc_dca_provider()
395 */
Maciej Sosnowski1a5aeee2009-09-10 15:05:58 +0200396void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
Shannon Nelson75896702007-10-16 01:27:41 -0700397{
Maciej Sosnowskieb4400e2009-02-02 23:26:57 -0800398 unsigned long flags;
Maciej Sosnowski1a5aeee2009-09-10 15:05:58 +0200399 struct pci_bus *pci_rc;
400 struct dca_domain *domain;
Maciej Sosnowskieb4400e2009-02-02 23:26:57 -0800401
Shannon Nelson75896702007-10-16 01:27:41 -0700402 blocking_notifier_call_chain(&dca_provider_chain,
403 DCA_PROVIDER_REMOVE, NULL);
Maciej Sosnowskieb4400e2009-02-02 23:26:57 -0800404
Mike Galbraitha1741e72010-07-07 10:29:01 +0200405 raw_spin_lock_irqsave(&dca_lock, flags);
Maciej Sosnowski1a5aeee2009-09-10 15:05:58 +0200406
Maciej Sosnowskic419fcf2012-05-23 17:27:07 +0200407 if (list_empty(&dca_domains)) {
408 raw_spin_unlock_irqrestore(&dca_lock, flags);
409 return;
410 }
411
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700412 list_del(&dca->node);
Maciej Sosnowski1a5aeee2009-09-10 15:05:58 +0200413
414 pci_rc = dca_pci_rc_from_dev(dev);
415 domain = dca_find_domain(pci_rc);
416 if (list_empty(&domain->dca_providers))
417 dca_free_domain(domain);
418
Mike Galbraitha1741e72010-07-07 10:29:01 +0200419 raw_spin_unlock_irqrestore(&dca_lock, flags);
Maciej Sosnowskieb4400e2009-02-02 23:26:57 -0800420
Shannon Nelson75896702007-10-16 01:27:41 -0700421 dca_sysfs_remove_provider(dca);
422}
423EXPORT_SYMBOL_GPL(unregister_dca_provider);
424
425/**
426 * dca_register_notify - register a client's notifier callback
427 */
428void dca_register_notify(struct notifier_block *nb)
429{
430 blocking_notifier_chain_register(&dca_provider_chain, nb);
431}
432EXPORT_SYMBOL_GPL(dca_register_notify);
433
434/**
435 * dca_unregister_notify - remove a client's notifier callback
436 */
437void dca_unregister_notify(struct notifier_block *nb)
438{
439 blocking_notifier_chain_unregister(&dca_provider_chain, nb);
440}
441EXPORT_SYMBOL_GPL(dca_unregister_notify);
442
443static int __init dca_init(void)
444{
Stephen Hemminger084dac52009-09-13 09:07:37 -0700445 pr_info("dca service started, version %s\n", DCA_VERSION);
Shannon Nelson75896702007-10-16 01:27:41 -0700446 return dca_sysfs_init();
447}
448
449static void __exit dca_exit(void)
450{
451 dca_sysfs_exit();
452}
453
Dan Williams652afc22009-01-06 11:38:22 -0700454arch_initcall(dca_init);
Shannon Nelson75896702007-10-16 01:27:41 -0700455module_exit(dca_exit);
456